content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from sklearn import neighbors
def knn_threshold(data, column, threshold=15, k=3):
"""
Cluster rare samples in data[column] with frequency less than
threshold with one of k-nearest clusters
Args:
data - pandas.DataFrame containing colums: latitude, longitude, column
column - the name of the column to threshold
threshold - the minimum sample frequency
k - the number of k-neighbors to explore when selecting cluster partner
"""
def ids_centers_sizes(data):
dat = np.array([(i, data.latitude[data[column]==i].mean(),
data.longitude[data[column]==i].mean(),
(data[column]==i).sum())
for i in set(list(data[column]))])
return dat[:,0], dat[:,1:-1].astype(float), dat[:,-1].astype(int)
knn = neighbors.NearestNeighbors(n_neighbors=k)
while True:
ids, centers, sizes = ids_centers_sizes(data)
asrt = np.argsort(sizes)
if sizes[asrt[0]] >= threshold:
break
cids = np.copy(ids)
knn.fit(centers)
for i in asrt:
if sizes[i] < threshold:
nearest = knn.kneighbors(centers[i])[1].flatten()
nearest = nearest[nearest != i]
sel = nearest[np.argmin(sizes[nearest])]
total_size = sizes[sel] + sizes[i]
data[column][data[column]==cids[i]] = cids[sel]
cids[cids==i] = cids[sel]
sizes[i] = total_size
sizes[sel] = total_size
return data | 37de2c0b4c14cdbb6a0dd10ee7ea1e270fe6ef56 | 22,495 |
def format_formula(formula):
"""Converts str of chemical formula into latex format for labelling purposes
Parameters
----------
formula: str
Chemical formula
"""
formatted_formula = ""
number_format = ""
for i, s in enumerate(formula):
if s.isdigit():
if not number_format:
number_format = "_{"
number_format += s
if i == len(formula) - 1:
number_format += "}"
formatted_formula += number_format
else:
if number_format:
number_format += "}"
formatted_formula += number_format
number_format = ""
formatted_formula += s
return r"$%s$" % (formatted_formula) | c3c87ffcdc5695b584892c643f02a7959b649935 | 22,497 |
def ParseQuery(query):
"""Parses the entire query.
Arguments:
query: The command the user sent that needs to be parsed.
Returns:
Dictionary mapping clause names to their arguments.
Raises:
bigquery_client.BigqueryInvalidQueryError: When invalid query is given.
"""
clause_arguments = {
'SELECT': [],
'AS': {},
'WITHIN': {},
'FROM': [],
'JOIN': [],
'WHERE': [],
'GROUP BY': [],
'HAVING': [],
'ORDER BY': [],
'LIMIT': [],
}
try:
_EBQParser(clause_arguments).parseString(query)
except ValueError as e:
raise bigquery_client.BigqueryInvalidQueryError(e, None, None, None)
return clause_arguments | b3348b10ec7aeb57916366b96409666b71c9a9ce | 22,498 |
def primary_astigmatism_00(rho, phi):
"""Zernike primary astigmatism 0°."""
return rho**2 * e.cos(2 * phi) | 031bb068b4384dc2cd15bebf3450faa25e0177bc | 22,499 |
def lpt_prototype(mesh,
nc=FLAGS.nc,
bs=FLAGS.box_size,
batch_size=FLAGS.batch_size,
a0=FLAGS.a0,
a=FLAGS.af,
nsteps=FLAGS.nsteps):
"""
Prototype of function computing LPT deplacement.
Returns output tensorflow and mesh tensorflow tensors
"""
klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
ipklin = iuspline(klin, plin)
stages = np.linspace(a0, a, nsteps, endpoint=True)
# Define the named dimensions
# Parameters of the small scales decomposition
n_block_x = FLAGS.nx
n_block_y = FLAGS.ny
n_block_z = 1
halo_size = FLAGS.hsize
if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):
new_size = int(0.5 *
min(nc // n_block_x, nc // n_block_y, nc // n_block_z))
print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))
halo_size = new_size
# Parameters of the large scales decomposition
downsampling_factor = 0
lnc = nc // 2**downsampling_factor
#
fx_dim = mtf.Dimension("nx", nc)
fy_dim = mtf.Dimension("ny", nc)
fz_dim = mtf.Dimension("nz", nc)
tfx_dim = mtf.Dimension("tx", nc)
tfy_dim = mtf.Dimension("ty", nc)
tfz_dim = mtf.Dimension("tz", nc)
tx_dim = mtf.Dimension("tx_lr", nc)
ty_dim = mtf.Dimension("ty_lr", nc)
tz_dim = mtf.Dimension("tz_lr", nc)
nx_dim = mtf.Dimension('nx_block', n_block_x)
ny_dim = mtf.Dimension('ny_block', n_block_y)
nz_dim = mtf.Dimension('nz_block', n_block_z)
sx_dim = mtf.Dimension('sx_block', nc // n_block_x)
sy_dim = mtf.Dimension('sy_block', nc // n_block_y)
sz_dim = mtf.Dimension('sz_block', nc // n_block_z)
k_dims = [tx_dim, ty_dim, tz_dim]
batch_dim = mtf.Dimension("batch", batch_size)
pk_dim = mtf.Dimension("npk", len(plin))
pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])
# Compute necessary Fourier kernels
kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)
kx = mtf.import_tf_tensor(mesh,
kvec[0].squeeze().astype('float32'),
shape=[tfx_dim])
ky = mtf.import_tf_tensor(mesh,
kvec[1].squeeze().astype('float32'),
shape=[tfy_dim])
kz = mtf.import_tf_tensor(mesh,
kvec[2].squeeze().astype('float32'),
shape=[tfz_dim])
kv = [ky, kz, kx]
# kvec for low resolution grid
kvec_lr = flowpm.kernels.fftk([nc, nc, nc], symmetric=False)
kx_lr = mtf.import_tf_tensor(mesh,
kvec_lr[0].squeeze().astype('float32'),
shape=[tx_dim])
ky_lr = mtf.import_tf_tensor(mesh,
kvec_lr[1].squeeze().astype('float32'),
shape=[ty_dim])
kz_lr = mtf.import_tf_tensor(mesh,
kvec_lr[2].squeeze().astype('float32'),
shape=[tz_dim])
kv_lr = [ky_lr, kz_lr, kx_lr]
shape = [batch_dim, fx_dim, fy_dim, fz_dim]
lr_shape = [batch_dim, fx_dim, fy_dim, fz_dim]
hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]
part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]
# Begin simulation
initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)
# # Reshaping array into high resolution mesh
# field = mtf.slicewise(lambda x:tf.expand_dims(tf.expand_dims(tf.expand_dims(x, axis=1),axis=1),axis=1),
# [initc],
# output_dtype=tf.float32,
# output_shape=hr_shape,
# name='my_reshape',
# splittable_dims=lr_shape[:-1]+hr_shape[1:4]+part_shape[1:3])
#
state = mtfpm.lpt_init_single(
initc,
a0,
kv_lr,
halo_size,
lr_shape,
hr_shape,
part_shape[1:],
antialias=True,
)
# Here we can run our nbody
final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)
# paint the field
final_field = mtf.zeros(mesh, shape=hr_shape)
for block_size_dim in hr_shape[-3:]:
final_field = mtf.pad(final_field, [halo_size, halo_size],
block_size_dim.name)
final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)
# Halo exchange
for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):
final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,
halo_size)
# Remove borders
for block_size_dim in hr_shape[-3:]:
final_field = mtf.slice(final_field, halo_size, block_size_dim.size,
block_size_dim.name)
#final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])
# Hack usisng custom reshape because mesh is pretty dumb
final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],
output_dtype=tf.float32,
output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],
name='my_dumb_reshape',
splittable_dims=part_shape[:-1] + hr_shape[:4])
return initc, final_field
## | ab9dfc52ddc26a62f9c9bc0b62dec044d0262d79 | 22,500 |
def in_collision(box1: OrientedBox, box2: OrientedBox) -> bool:
"""
Check for collision between two boxes. First do a quick check by approximating each box with a circle,
if there is an overlap, check for the exact intersection using geometry Polygon
:param box1: Oriented box (e.g., of ego)
:param box2: Oriented box (e.g., of other tracks)
:return True if there is a collision between the two boxes.
"""
return bool(box1.geometry.intersects(box2.geometry)) if collision_by_radius_check(box1, box2) else False | 290c7de8b73ff31349ec020eb745209a28cdb460 | 22,501 |
def process_embedded_query_expr(input_string):
"""
This function scans through the given script and identify any path/metadata
expressions. For each expression found, an unique python variable name will
be generated. The expression is then substituted by the variable name.
:param str input_string: The input script
:return: A 2-element tuple of the substituted string and a dict of substitutions
:rtype: (str, dict)
"""
keep = []
state = ''
idx_char = idx_var = 0
substitutions = {} # keyed by query expression
query_expr = []
while idx_char < len(input_string):
c = input_string[idx_char]
if state == STATE_EMBEDDED_QUERY:
if c == '}':
state = STATE_IDLE
s = ''.join(query_expr).strip()
query_expr = []
if s not in substitutions:
varname = 'PBK_{}'.format(idx_var)
idx_var += 1
substitutions[s] = varname
else:
varname = substitutions[s]
keep.append(varname)
else:
query_expr.append(c)
elif (c == "'" or c == '"') and state != STATE_EMBEDDED_QUERY:
if state == c: # quoting pair found, pop it
state = STATE_IDLE
elif state == '': # new quote begins
state = c
keep.append(c)
elif c == '$' and state == STATE_IDLE: # an unquoted $
if idx_char + 1 < len(input_string) and input_string[idx_char + 1] == '{':
state = STATE_EMBEDDED_QUERY
# Once it enters the embedded query state, any pond,
# double/single quotes will be ignored
idx_char += 1
else:
keep.append(c)
elif c == '#' and state == STATE_IDLE:
state = STATE_COMMENT
keep.append(c)
elif c == '\n' and state == STATE_COMMENT:
state = STATE_IDLE
keep.append(c)
else:
keep.append(c)
idx_char += 1
return ''.join(keep), substitutions | 013c37c9fb63a447ac844d94c2a08f8b53fd759b | 22,502 |
def format_elemwise(vars_):
"""Formats all the elementwise cones for the solver.
Parameters
----------
vars_ : list
A list of the LinOp expressions in the elementwise cones.
Returns
-------
list
A list of LinLeqConstr that represent all the elementwise cones.
"""
# Create matrices Ai such that 0 <= A0*x0 + ... + An*xn
# gives the format for the elementwise cone constraints.
spacing = len(vars_)
prod_size = (spacing*vars_[0].size[0], vars_[0].size[1])
# Matrix spaces out columns of the LinOp expressions.
mat_size = (spacing*vars_[0].size[0], vars_[0].size[0])
terms = []
for i, var in enumerate(vars_):
mat = get_spacing_matrix(mat_size, spacing, i)
terms.append(lu.mul_expr(mat, var, prod_size))
return [lu.create_geq(lu.sum_expr(terms))] | 36cf91dc01549c4a2a01b4d301d387f002f8eee1 | 22,503 |
def extract_stars(image, noise_threshold):
"""
Extract all star from the given image
Returns a list of rectangular images
"""
roi_list = []
image_list = []
# Threshold to remove background noise
image = image.copy()
image[image < noise_threshold] = 0.0
# Create binary image by thresholding
binary = image.copy()
binary[binary > 0] = 1
# Find the next white pixel in the image
i, j = find_next_while_pixel(binary)
while i is not None and j is not None:
# Construct the ROI around the pixel
i, j, w, h = construct_roi(binary, i, j)
# Save ROI to list or roi
roi_list.append([i, j, w, h])
# Erase ROI from image
binary[i:i+h, j:j+w] = 0
# Extract image region
image_list.append(np.array(image[i:i+h, j:j+w]))
# Find the next white pixel and repeat
i, j = find_next_while_pixel(binary)
return np.array(roi_list), image_list | 3b252525d14a875ba96e66edead179096e62b1af | 22,504 |
import torch
def lovasz_hinge(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss | 07eae3d43fda67cb2c195c6f8f72774d99f3195d | 22,505 |
from bs4 import BeautifulSoup
def extract_metadata(url: str, body: BeautifulSoup) -> Website:
"""
Extract metadata from a site and put it into a `Website object`.
"""
try:
name = body.title.get_text().strip()
except AttributeError:
name = url
try:
description = (
body.find(attrs={"name": "description"}).get("content").strip()
)
except AttributeError:
description = extract_text(body)[:400] + "..."
try:
icon = urljoin(url, body.find("link", rel="icon").get("href"))
except AttributeError:
# As Browsers do, if the html doesn't specify an icon we will just try
# the default path
icon = urljoin(url, "/favicon.ico")
return Website(
url,
name,
description,
icon,
) | 534ee50ee2a8daa39730f795cd4bfb16c1dacc1e | 22,506 |
def markContinuing(key, idea, oldest_idea_id, oldest_idea_detect_time, accum):
"""
Mark IDEA as continuing event.
:return: marked key, IDEA
"""
# If idea is present
if idea:
# Equality of ID's in tuple and idea, if true mark will be added
if oldest_idea_id != idea.id:
# Add {key: (ID, DetectTime)} to accumulator
accum.add(dict([(key, (oldest_idea_id, oldest_idea_detect_time))]))
# Add id mark for continuing event
idea.aida_continuing=oldest_idea_id
# Return tuple: key for next deduplication phase and IDEA
return (key[0:3], idea) | 3f83283f284693b0d0fdee7129fe0fa51b2a9174 | 22,507 |
import torch
def box1_in_box2(corners1:torch.Tensor, corners2:torch.Tensor):
"""check if corners of box1 lie in box2
Convention: if a corner is exactly on the edge of the other box, it's also a valid point
Args:
corners1 (torch.Tensor): (B, N, 4, 2)
corners2 (torch.Tensor): (B, N, 4, 2)
Returns:
c1_in_2: (B, N, 4) Bool
"""
a = corners2[:, :, 0:1, :] # (B, N, 1, 2)
b = corners2[:, :, 1:2, :] # (B, N, 1, 2)
d = corners2[:, :, 3:4, :] # (B, N, 1, 2)
ab = b - a # (B, N, 1, 2)
am = corners1 - a # (B, N, 4, 2)
ad = d - a # (B, N, 1, 2)
p_ab = torch.sum(ab * am, dim=-1) # (B, N, 4)
norm_ab = torch.sum(ab * ab, dim=-1) # (B, N, 1)
p_ad = torch.sum(ad * am, dim=-1) # (B, N, 4)
norm_ad = torch.sum(ad * ad, dim=-1) # (B, N, 1)
# NOTE: the expression looks ugly but is stable if the two boxes are exactly the same
# also stable with different scale of bboxes
cond1 = (p_ab / norm_ab > - 1e-6) * (p_ab / norm_ab < 1 + 1e-6) # (B, N, 4)
cond2 = (p_ad / norm_ad > - 1e-6) * (p_ad / norm_ad < 1 + 1e-6) # (B, N, 4)
return cond1*cond2 | f7c5e442aadfadd15dcfdd32c3358f784ac418bc | 22,508 |
def in_line_rate(line, container_line):
"""一个线段和另一个线段的重合部分,占该线段总长的占比"""
inter = intersection_line(line, container_line)
return inter / (line[1] - line[0]) | 3f56b05c0bbe42030c1fd6f684724c2afc922135 | 22,509 |
def test_cli_requires():
"""Test to ensure your can add requirements to a CLI"""
def requires_fail(**kwargs):
return {'requirements': 'not met'}
@hug.cli(output=str, requires=requires_fail)
def cli_command(name: str, value: int):
return (name, value)
assert cli_command('Testing', 1) == ('Testing', 1)
assert hug.test.cli(cli_command, 'Testing', 1) == {'requirements': 'not met'} | 2febbfa4ed51a22e057494dfaeb45c99400b72d4 | 22,510 |
def comm_for_pid(pid):
"""Retrieve the process name for a given process id."""
try:
return slurp('/proc/%d/comm' % pid)
except IOError:
return None | 49aa200986f3fcafd053e5708a08a4ff5873b40e | 22,511 |
def get_machine_type_from_run_num(run_num):
"""these are the values to be used in config for machine dependent settings"""
id_to_machine = {
'MS001': 'miseq',
'NS001': 'nextseq',
'HS001': 'hiseq 2500 rapid',
'HS002': 'hiseq 2500',
'HS003': 'hiseq 2500',
'HS004': 'hiseq 2500',
'HS005': 'macrogen',
'HS006': 'hiseq 4000',
'HS007': 'hiseq 4000',
'HS008': 'hiseq 4000',
'NG001': 'novogene hiseq x5',
'NG002': 'novogene hiseq x5',
'NG003': 'novogene hiseq x5',
'NG004': 'novogene hiseq x5',
'NG005': 'novogene hiseq x5',
}
machine_id = run_num.split('-')[0]
try:
machine_type = id_to_machine[machine_id]
except KeyError:
logger.critical("Unknown machine id %s", machine_id)
raise
return machine_type | 117b5cb1646a0295be28f5875c3cd9d9c09c67ea | 22,512 |
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/personal")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html") | 4ef618ea5028fca74664ef7cfdd8de9dae6de007 | 22,514 |
def twisted_sleep(time):
"""
Return a deferred that will be triggered after the specified amount of
time passes
"""
return task.deferLater(reactor, time, lambda: None) | f26cdbc7c8af8f19658241ae01465c418253f040 | 22,515 |
import pickle
async def async_load_cache(
filename: str,
) -> dict[str, str | dict[str, dict[str, dict[str, dict[str, str]]]]]:
"""Load cache from file."""
async with aiofiles.open(filename, "rb") as file:
pickled_foo = await file.read()
return pickle.loads(pickled_foo) | 4b64e9f70d1dfd0627625edb69e80a166ebdeeb1 | 22,517 |
import six
def make_function(function, name, arity):
"""Make a function node, a representation of a mathematical relationship.
This factory function creates a function node, one of the core nodes in any
program. The resulting object is able to be called with NumPy vectorized
arguments and return a resulting vector based on a mathematical
relationship.
Parameters
----------
function : callable
A function with signature `function(x1, *args)` that returns a Numpy
array of the same shape as its arguments.
name : str
The name for the function as it should be represented in the program
and its visualizations.
arity : int
The number of arguments that the `function` takes.
"""
if not isinstance(arity, int):
raise ValueError('arity must be an int, got %s' % type(arity))
if not isinstance(function, np.ufunc):
if six.get_function_code(function).co_argcount != arity:
raise ValueError('arity %d does not match required number of '
'function arguments of %d.'
% (arity,
six.get_function_code(function).co_argcount))
if not isinstance(name, six.string_types):
raise ValueError('name must be a string, got %s' % type(name))
# Check output shape
args = [np.ones(10) for _ in range(arity)]
try:
function(*args)
except ValueError:
raise ValueError('supplied function %s does not support arity of %d.'
% (name, arity))
if not hasattr(function(*args), 'shape'):
raise ValueError('supplied function %s does not return a numpy array.'
% name)
if function(*args).shape != (10,):
raise ValueError('supplied function %s does not return same shape as '
'input vectors.' % name)
# Check closure for zero & negative input arguments
args = [np.zeros(10) for _ in range(arity)]
if not np.all(np.isfinite(function(*args))):
raise ValueError('supplied function %s does not have closure against '
'zeros in argument vectors.' % name)
args = [-1 * np.ones(10) for _ in range(arity)]
if not np.all(np.isfinite(function(*args))):
raise ValueError('supplied function %s does not have closure against '
'negatives in argument vectors.' % name)
return _Function(function, name, arity) | 460d453888e025832983e7a822d3dfd498f0d176 | 22,518 |
def data_type_validator(type_name='data type'):
"""
Makes sure that the field refers to a valid data type, whether complex or primitive.
Used with the :func:`field_validator` decorator for the ``type`` fields in
:class:`PropertyDefinition`, :class:`AttributeDefinition`, :class:`ParameterDefinition`,
and :class:`EntrySchema`.
Extra behavior beyond validation: generated function returns true if field is a complex data
type.
"""
def validator(field, presentation, context):
field.default_validate(presentation, context)
value = getattr(presentation, field.name)
if value is not None:
# Test for circular definitions
container_data_type = get_container_data_type(presentation)
if (container_data_type is not None) and (container_data_type._name == value):
context.validation.report(
'type of property "%s" creates a circular value hierarchy: %s'
% (presentation._fullname, safe_repr(value)),
locator=presentation._get_child_locator('type'), level=Issue.BETWEEN_TYPES)
# Can be a complex data type
if get_type_by_full_or_shorthand_name(context, value, 'data_types') is not None:
return True
# Can be a primitive data type
if get_primitive_data_type(value) is None:
report_issue_for_unknown_type(context, presentation, type_name, field.name)
return False
return validator | d949eddfcfbe941e6ee74127761336fbc1e006db | 22,519 |
def list_challenge_topics(account_name, challenge_name): # noqa: E501
"""List stargazers
Lists the challenge topics. # noqa: E501
:param account_name: The name of the account that owns the challenge
:type account_name: str
:param challenge_name: The name of the challenge
:type challenge_name: str
:rtype: ArrayOfTopics
"""
try:
account = DbAccount.objects.get(login=account_name)
account_id = account.to_dict().get("id")
db_challenge = DbChallenge.objects.get(
ownerId=account_id, name=challenge_name
) # noqa: E501
res = ArrayOfTopics(topics=db_challenge.to_dict().get("topics"))
status = 200
except DoesNotExist:
status = 404
res = Error("The specified resource was not found", status)
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status | 24daefe48f62c649ee31f362c418eb62f0dd6c33 | 22,520 |
import copy
def ee_reg2(x_des, quat_des, sim, ee_index, kp=None, kv=None, ndof=12):
"""
same as ee_regulation, but now also accepting quat_des.
"""
kp = np.eye(len(sim.data.body_xpos[ee_index]))*10 if kp is None else kp
kv = np.eye(len(sim.data.body_xpos[ee_index]))*1 if kv is None else kv
jacp,jacr=jac(sim, ee_index, ndof)
# % compute position error terms as before
xdot = np.matmul(jacp, sim.data.qvel[:ndof])
error_vel = xdot
error_pos = x_des - sim.data.body_xpos[ee_index]
pos_term = np.matmul(kp,error_pos)
vel_term = np.matmul(kv,error_vel)
# % compute orientation error terms
current_ee_quat = copy.deepcopy(sim.data.body_xquat[ee_index])
current_ee_rotmat = R.from_quat([current_ee_quat[1],
current_ee_quat[2],
current_ee_quat[3],
current_ee_quat[0]])
target_ee_rotmat = R.from_quat([quat_des[1],
quat_des[2],
quat_des[3],
quat_des[0]])
ori_error = calculate_orientation_error(target_ee_rotmat.as_dcm(), current_ee_rotmat.as_dcm())
euler_dot = np.matmul(jacr, sim.data.qvel[:ndof])
ori_pos_term = np.matmul(kp, ori_error)
ori_vel_term = np.matmul(kv, euler_dot)
# % commanding ee pose only
F_pos = pos_term - vel_term
F_ori = ori_pos_term - ori_vel_term
J_full = np.concatenate([jacp, jacr])
F_full = np.concatenate([F_pos, F_ori])
torques = np.matmul(J_full.T, F_full) + sim.data.qfrc_bias[:ndof]
return torques | 23a0f818c57cf0760eff4f74ec7b94bd337e14ab | 22,521 |
def _default_clipping(
inner_factory: factory.AggregationFactory) -> factory.AggregationFactory:
"""The default adaptive clipping wrapper."""
# Adapts relatively quickly to a moderately high norm.
clipping_norm = quantile_estimation.PrivateQuantileEstimationProcess.no_noise(
initial_estimate=1.0, target_quantile=0.8, learning_rate=0.2)
return robust.clipping_factory(clipping_norm, inner_factory) | c39c143bebe78bec0bcd7b8d9f3457a04ac7b5a4 | 22,522 |
import torch
def make_pred_multilabel(data_transforms, model, PATH_TO_IMAGES, epoch_loss, CHROMOSOME):
"""
Gives predictions for test fold and calculates AUCs using previously trained model
Args:
data_transforms: torchvision transforms to preprocess raw images; same as validation transforms
model: densenet-121 from torchvision previously fine tuned to training data
PATH_TO_IMAGES: path at which NIH images can be found
Returns:
pred_df: dataframe containing individual predictions and ground truth for each test image
auc_df: dataframe containing aggregate AUCs by train/test tuples
"""
# calc preds in batches of 16, can reduce if your GPU has less RAM
BATCH_SIZE = 32
# set model to eval mode; required for proper predictions given use of batchnorm
model.train(False)
# create dataloader
dataset = CXR.CXRDataset(
path_to_images=PATH_TO_IMAGES,
fold="test",
transform=data_transforms['val'])
dataloader = torch.utils.data.DataLoader(
dataset, BATCH_SIZE, shuffle=False, num_workers=0)
size = len(dataset)
# create empty dfs
pred_df = pd.DataFrame(columns=["Image Index"])
true_df = pd.DataFrame(columns=["Image Index"])
# iterate over dataloader
for i, data in enumerate(dataloader):
inputs, labels, _ = data
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
true_labels = labels.cpu().data.numpy()
batch_size = true_labels.shape
outputs = model(inputs)
probs = outputs.cpu().data.numpy()
return BATCH_SIZE | 42fb9446df2e0a8cc5d408957db21622bd5bb96e | 22,523 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a config entry for solarlog."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True | 2cb14b9a71b16409aa9030acafd8c677efe1e22a | 22,524 |
def SqueezeNet_v1(include_top=True,
input_tensor=None, input_shape=None,
classes=10):
"""Instantiates the SqueezeNet architecture.
"""
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=32,
data_format=K.image_data_format(),
require_flatten=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Conv2D(96, (3, 3), padding='same', name='conv1')(img_input)
x = Activation('relu', name='relu_conv1')(x)
# x = MaxPooling2D(pool_size=(2, 2), name='pool1')(x)
x = fire_module(x, fire_id=2, squeeze=16, expand=64)
x = fire_module(x, fire_id=3, squeeze=16, expand=64)
x = fire_module(x, fire_id=4, squeeze=32, expand=128)
x = MaxPooling2D(pool_size=(2, 2), name='pool4')(x)
x = fire_module(x, fire_id=5, squeeze=32, expand=128)
x = fire_module(x, fire_id=6, squeeze=48, expand=192)
x = fire_module(x, fire_id=7, squeeze=48, expand=192)
x = fire_module(x, fire_id=8, squeeze=64, expand=256)
x = MaxPooling2D(pool_size=(2, 2), name='pool8')(x)
x = fire_module(x, fire_id=9, squeeze=64, expand=256)
x = BatchNormalization()(x)
# x = Dropout(0.5, name='drop9')(x)
# x = Convolution2D(1000, (1, 1), padding='valid', name='conv10')(x)
x = Activation('relu', name='relu_10')(x)
x = GlobalAveragePooling2D(name="avgpool10")(x)
x = Dense(classes, activation='softmax', name="softmax-10")(x)
# x = Activation('softmax', name='softmax')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name='squeezenet')
return model | b31f613a63836e88bf04c0b38240ce256cf9b2ae | 22,525 |
def xcafdoc_ColorRefGUID(*args):
"""
* Return GUIDs for TreeNode representing specified types of colors
:param type:
:type type: XCAFDoc_ColorType
:rtype: Standard_GUID
"""
return _XCAFDoc.xcafdoc_ColorRefGUID(*args) | b5d300d656977402d95c0227462e8da6224a3eff | 22,526 |
import code
def green_on_yellow(string, *funcs, **additional):
"""Text color - green on background color - yellow. (see _combine())."""
return _combine(string, code.GREEN, *funcs, attributes=(code.BG_YELLOW,)) | 90b2ae25b1e58da8b3a7a1d3b76468cfade3887a | 22,527 |
def _register_models(format_str, cls, forward=True):
"""Registers reward models of type cls under key formatted by format_str."""
forwards = {"Forward": {"forward": forward}, "Backward": {"forward": not forward}}
control = {"WithCtrl": {}, "NoCtrl": {"ctrl_coef": 0.0}}
res = {}
for k1, cfg1 in forwards.items():
for k2, cfg2 in control.items():
fn = registry.build_loader_fn_require_space(cls, **cfg1, **cfg2)
key = format_str.format(k1 + k2)
reward_serialize.reward_registry.register(key=key, value=fn)
return res | 96c95d83841b381777ce817e401cc6c7e8a5dc1d | 22,528 |
def configure_pseudolabeler(pseudolabel: bool, pseudolabeler_builder, pseudolabeler_builder_args):
"""Pass in a class that can build a pseudolabeler (implementing __call__) or a builder function
that returns a pseudolabeling function.
"""
if pseudolabel:
return globals()[pseudolabeler_builder](*pseudolabeler_builder_args)
return None | 3e31869542a977cc4b72267b348f7e087ccb2aee | 22,529 |
def flip_dict(dict, unique_items=False, force_list_values=False):
"""Swap keys and values in a dictionary
Parameters
----------
dict: dictionary
dictionary object to flip
unique_items: bool
whether to assume that all items in dict are unique, potential speedup but repeated items will be lost
force_list_values: bool
whether to force all items in the result to be lists or to let unique items have unwrapped values. Doesn't apply if unique_items is true.
"""
if unique_items:
return {v: k for k, v in dict.items()}
elif force_list_values:
new_dict = {}
for k, v in dict.items():
if v not in new_dict:
new_dict[v] = []
new_dict[v].append(k)
return new_dict
else:
new_dict = {}
for k, v in dict.items():
if v in new_dict:
if isinstance(new_dict[v], list):
new_dict[v].append(k)
else:
new_dict[v] = [new_dict[v], k]
else:
new_dict[v] = k
return new_dict | c8344852bc76321f80b4228671707ef7b48e4f71 | 22,531 |
def randn(N, R, var = 1.0, dtype = tn.float64, device = None):
"""
A torchtt.TT tensor of shape N = [N1 x ... x Nd] and rank R is returned.
The entries of the fuill tensor are alomst normal distributed with the variance var.
Args:
N (list[int]): the shape.
R (list[int]): the rank.
var (float, optional): the variance. Defaults to 1.0.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Returns:
torchtt.TT: the result.
"""
d = len(N)
v1 = var / np.prod(R)
v = v1**(1/d)
cores = [None] * d
for i in range(d):
cores[i] = tn.randn([R[i],N[i][0],N[i][1],R[i+1]] if isinstance(N[i],tuple) else [R[i],N[i],R[i+1]], dtype = dtype, device = device)*np.sqrt(v)
return TT(cores) | a88dc6a6602adf16617086d35ae43ed6f1eff796 | 22,532 |
def flatten_all_dimensions_but_first(a):
"""
Flattens all dimensions but the first of a multidimensional array.
Parameters
----------
a : ndarray
Array to be flattened.
Returns
-------
b : ndarray
Result of flattening, two-dimensional.
"""
s = a.shape
s_flattened = (s[0], np.prod(s[1:]))
return a.reshape(*s_flattened) | 80c150e81cd03f6195234da2419ee78c6bee1e54 | 22,533 |
def getHRLanguages(fname, hrthreshold=0):
"""
:param fname: the name of the file containing filesizes. Created using wc -l in the wikidata folder
:param hrthreshold: how big a set of transliteration pairs needs to be considered high resource
:return: a list of language names (in ISO 639-3 format?)
"""
hrlangs = set()
with open(fname) as fs:
for line in fs:
long,iso639_3,iso639_1,size = line.strip().split()
if int(size) > hrthreshold:
hrlangs.add(iso639_3)
return hrlangs | 184f91f40aba76c6ebdcd553c0054b4b1a73da5d | 22,534 |
def _wrap(func, *args, **kwargs):
"""To do."""
def _convert(func_, obj):
try:
return func_(obj)
except BaseException:
return obj
# First, decode each arguments
args_ = [_convert(decode, x) for x in args]
kwargs_ = {k: _convert(decode, v) for k, v in kwargs.items()}
# Execute the function
returned = func(*args_, **kwargs_)
if isinstance(returned, OpenMaya.MSelectionList):
returned = returned.getSelectionStrings()
# Finally encode the returned object(s)
if isinstance(returned, _STRING_TYPES):
return _convert(encode, returned)
if isinstance(returned, (list, tuple, set)):
return type(returned)(_convert(encode, x) for x in returned)
return returned | d3b951c664a098f6ce3d0c024cb2ae92b2fa9314 | 22,535 |
import itertools
def make_id_graph(xml):
"""
Make an undirected graph with CPHD identifiers as nodes and edges from correspondence and hierarchy.
Nodes are named as {xml_path}<{id}, e.g. /Data/Channel/Identifier<Ch1
There is a single "Data" node formed from the Data branch root that signifies data that can be read from the file
Args
----
xml: `lxml.etree.ElementTree.Element`
Root CPHD XML node
Returns
-------
id_graph: `networkx.Graph`
Undirected graph
* nodes: Data node, CPHD identifiers
* edges: Parent identifiers to child identifiers; corresponding identifiers across XML branches
"""
id_graph = nx.Graph()
def add_id_nodes_from_path(xml_path):
id_graph.add_nodes_from(["{}<{}".format(xml_path, n.text) for n in xml.findall('.' + xml_path)])
def add_id_nodes_from_path_with_connected_root(xml_path):
root_node = xml_path.split('/')[1]
id_graph.add_edges_from(zip(itertools.repeat(root_node),
["{}<{}".format(xml_path, n.text) for n in xml.findall('.' + xml_path)]))
def get_id_from_node_name(node_name):
return node_name.split('<')[-1]
def connect_matching_id_nodes(path_a, path_b):
all_nodes = list(id_graph.nodes)
all_a = {get_id_from_node_name(x): x for x in all_nodes if x.split('<')[0] == path_a}
all_b = {get_id_from_node_name(x): x for x in all_nodes if x.split('<')[0] == path_b}
for k in set(all_a).intersection(all_b):
id_graph.add_edge(all_a[k], all_b[k])
def add_and_connect_id_nodes(path_a, path_b):
add_id_nodes_from_path(path_a)
add_id_nodes_from_path(path_b)
connect_matching_id_nodes(path_a, path_b)
def add_and_connect_children(parent_path, parent_id_name, children_paths):
for parent in xml.findall('.' + parent_path):
parent_id = parent.findtext(parent_id_name)
for child_path in children_paths:
for child in parent.findall('.' + child_path):
id_graph.add_edge('{}/{}<{}'.format(parent_path, parent_id_name, parent_id),
'{}/{}<{}'.format(parent_path, child_path, child.text))
add_id_nodes_from_path_with_connected_root('/Data/Channel/Identifier')
add_id_nodes_from_path_with_connected_root('/Data/SupportArray/Identifier')
channel_children = ['DwellTimes/CODId', 'DwellTimes/DwellId']
channel_children += ['Antenna/'+ident for ident in ('TxAPCId', 'TxAPATId', 'RcvAPCId', 'RcvAPATId')]
channel_children += ['TxRcv/TxWFId', 'TxRcv/RcvId']
add_and_connect_children('/Channel/Parameters', 'Identifier', channel_children)
connect_matching_id_nodes('/Data/Channel/Identifier', '/Channel/Parameters/Identifier')
add_and_connect_id_nodes('/Data/SupportArray/Identifier', '/SupportArray/IAZArray/Identifier')
add_and_connect_id_nodes('/Data/SupportArray/Identifier', '/SupportArray/AntGainPhase/Identifier')
add_and_connect_id_nodes('/Data/SupportArray/Identifier', '/SupportArray/AddedSupportArray/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/DwellTimes/CODId', '/Dwell/CODTime/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/DwellTimes/DwellId', '/Dwell/DwellTime/Identifier')
add_and_connect_id_nodes('/Antenna/AntCoordFrame/Identifier', '/Antenna/AntPhaseCenter/ACFId')
add_and_connect_children('/Antenna/AntPattern', 'Identifier',
('GainPhaseArray/ArrayId', 'GainPhaseArray/ElementId'))
add_and_connect_children('/Antenna/AntPhaseCenter', 'Identifier', ('ACFId',))
add_and_connect_id_nodes('/Channel/Parameters/Antenna/TxAPCId', '/Antenna/AntPhaseCenter/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/Antenna/TxAPATId', '/Antenna/AntPattern/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/Antenna/RcvAPCId', '/Antenna/AntPhaseCenter/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/Antenna/RcvAPATId', '/Antenna/AntPattern/Identifier')
connect_matching_id_nodes('/SupportArray/AntGainPhase/Identifier', '/Antenna/AntPattern/GainPhaseArray/ArrayId')
connect_matching_id_nodes('/SupportArray/AntGainPhase/Identifier', '/Antenna/AntPattern/GainPhaseArray/ElementId')
add_and_connect_id_nodes('/Channel/Parameters/TxRcv/TxWFId', '/TxRcv/TxWFParameters/Identifier')
add_and_connect_id_nodes('/Channel/Parameters/TxRcv/RcvId', '/TxRcv/RcvParameters/Identifier')
return id_graph | d83bf22f76393d1213b469ebd53d93dca30e9d90 | 22,536 |
import base64
def aes_base64_encrypt(data, key):
"""
@summary:
1. pkcs7padding
2. aes encrypt
3. base64 encrypt
@return:
string
"""
cipher = AES.new(key)
return base64.b64encode(cipher.encrypt(_pkcs7padding(data))) | 7f32b4a3848a4084ebd90c5a941c35e19d57d0ec | 22,537 |
def mast_query_darks(instrument, aperture, start_date, end_date):
"""Use ``astroquery`` to search MAST for dark current data
Parameters
----------
instrument : str
Instrument name (e.g. ``nircam``)
aperture : str
Detector aperture to search for (e.g. ``NRCA1_FULL``)
start_date : float
Starting date for the search in MJD
end_date : float
Ending date for the search in MJD
Returns
-------
query_results : list
List of dictionaries containing the query results
"""
# Make sure instrument is correct case
if instrument.lower() == 'nircam':
instrument = 'NIRCam'
dark_template = ['NRC_DARK']
elif instrument.lower() == 'niriss':
instrument = 'NIRISS'
dark_template = ['NIS_DARK']
elif instrument.lower() == 'nirspec':
instrument = 'NIRSpec'
dark_template = ['NRS_DARK']
elif instrument.lower() == 'fgs':
instrument = 'FGS'
dark_template = ['FGS_DARK']
elif instrument.lower() == 'miri':
instrument = 'MIRI'
dark_template = ['MIR_DARKALL', 'MIR_DARKIMG', 'MIR_DARKMRS']
# monitor_mast.instrument_inventory does not allow list inputs to
# the added_filters input (or at least if you do provide a list, then
# it becomes a nested list when it sends the query to MAST. The
# nested list is subsequently ignored by MAST.)
# So query once for each dark template, and combine outputs into a
# single list.
query_results = []
for template_name in dark_template:
# Create dictionary of parameters to add
parameters = {"date_obs_mjd": {"min": start_date, "max": end_date},
"apername": aperture, "exp_type": template_name}
query = monitor_mast.instrument_inventory(instrument, dataproduct=JWST_DATAPRODUCTS,
add_filters=parameters, return_data=True, caom=False)
if 'data' in query.keys():
if len(query['data']) > 0:
query_results.extend(query['data'])
return query_results | f612068ff220cf02cf6582478d257ff842f72eef | 22,540 |
import random
def randomNumGen(choice):
"""Get a random number to simulate a d6, d10, or d100 roll."""
if choice == 1: #d6 roll
die = random.randint(1, 6)
elif choice == 2: #d10 roll
die = random.randint(1, 10)
elif choice == 3: #d100 roll
die = random.randint(1, 100)
elif choice == 4: #d4 roll
die = random.randint(1, 4)
elif choice == 5: #d8 roll
die = random.randint(1, 8)
elif choice == 6: #d12 roll
die = random.randint(1, 12)
elif choice == 7: #d20 roll
die = random.randint(1, 20)
else: #simple error message
return "Shouldn't be here. Invalid choice"
return die | 307194d60a79ee2b101f7743002a380848e68628 | 22,541 |
def is_distinct(coll, key=EMPTY):
"""Checks if all elements in the collection are different."""
if key is EMPTY:
return len(coll) == len(set(coll))
else:
return len(coll) == len(set(xmap(key, coll))) | 94469c2915e5164238999f1d98c850856034652e | 22,543 |
def split_data(df_data, config, test_frac=0.2):
"""
split df_data to train and test.
"""
df_train, df_test = train_test_split(df_data, test_size=test_frac)
df_train.reset_index(inplace=True, drop=True)
df_test.reset_index(inplace=True, drop=True)
df_train.to_csv(config.path_train_data, index=False)
df_test.to_csv(config.path_test_data, index=False)
return df_train | 6b9b9301d15e29562933164343d894880641aed8 | 22,544 |
import requests
def query(params, lang='en'):
"""
Simple Mediawiki API wrapper
"""
url = 'https://%s.wikipedia.org/w/api.php' % lang
finalparams = {
'action': 'query',
'format': 'json',
}
finalparams.update(params)
resp = requests.get(url, params=finalparams)
if not resp.ok:
return None
data = resp.json()
if 'query' in data:
return data['query'] | 990ca6aae015e3106920ce67eb4e29f39e8a8f4c | 22,545 |
from datetime import datetime
import time
def reporting_window(year, month):
"""
Returns the range of time when people are supposed to report
"""
last_of_last_month = datetime(year, month, 1) - timedelta(days=1)
last_bd_of_last_month = datetime.combine(
get_business_day_of_month(last_of_last_month.year, last_of_last_month.month, -1),
time()
)
last_bd_of_the_month = get_business_day_of_month(year, month, -1)
return last_bd_of_last_month, last_bd_of_the_month | 89f1c6f42257068c9483cc9870e0774fab262b13 | 22,546 |
def fit_cluster_13():
"""Fit a GMM to resolve objects in cluster 13 into C, Q, O.
Returns
-------
sklearn.mixture.GaussianMixture
The mixture model trained on the latent scores.
list
The classes represented in order by the model components.
"""
data = classy.data.load()
X13 = data.loc[data.cluster == 13, ["z1", "z3"]]
gmm = GaussianMixture(n_components=3, random_state=17).fit(X13)
# Determine which component captures which class
CLASSES = ["", "", ""]
for ind, class_ in zip(np.argsort(gmm.means_[:, 0]), ["C", "Q", "O"]):
CLASSES[ind] = class_
return gmm, CLASSES | 5e242716633a759b2dcdbcbd68cbd441c7c0281e | 22,548 |
def sidebar_left(request):
"""
Return the left sidebar values in context
"""
if request.user.is_authenticated():
moderation_obj = {
'is_visible': False,
'count_notifs': 0,
}
if request.user.is_staff:
moderation_obj['is_visible'] = True
moderation_obj['count_notifs'] = ModerationHelper.count_unmoderated(request.user)
return {
'sidebar_left': {
'moderation': moderation_obj,
},
}
return {} | 161a0bdc872f8dfff9e57156e58685cb600d2be4 | 22,549 |
import torch
def get_edge_lengths(vertices, edge_points):
"""
get edge squared length using edge_points from get_edge_points(mesh) or edge_vertex_indices(faces)
:params
vertices (N,3)
edge_points (E,4)
"""
N, D = vertices.shape
E = edge_points.shape[0]
# E,2,D (OK to do this kind of indexing on the first dimension)
edge_vertices = vertices[edge_points[:,:2]]
edges = (edge_vertices[:,0,:]-edge_vertices[:,1,:])
edges_sqrlen = torch.sum(edges * edges, dim=-1)
return edges_sqrlen | 396d7d669d96611fb65c20b99347ab8041ff3f5a | 22,550 |
def compute_pca(nparray):
"""
:param nparray: nxd array, d is the dimension
:return: evs eigenvalues, axmat dxn array, each column is an eigenvector
author: weiwei
date: 20200701osaka
"""
ca = np.cov(nparray, y=None, rowvar=False, bias=True) # rowvar row=point, bias biased covariance
pcv, pcaxmat = np.linalg.eig(ca)
return pcv, pcaxmat | 0aa1d731c0d296cc66a9275e466e4ce3d57a8621 | 22,551 |
def fac(num):
"""求阶乘"""
assert num >= 0
if num in (0, 1):
return 1
return num * fac(num - 1) | e043e03e1d528dd9ec5685c4483e70217c948a0b | 22,552 |
def entropy(logp, p):
"""Compute the entropy of `p` - probability density function approximation.
We need this in order to compute the entropy-bonus.
"""
H = -(logp * p).sum(dim=1).mean()
return H | dff7c89979e5a9cef65088fd9f8858bb66bf218f | 22,553 |
def find(query):
"""Retrieve *exactly* matching tracks."""
args = _parse_query(query)
return mpctracks('find', args) | 656b2f7dfc4642cbe5294a888f5c4873e905140a | 22,554 |
import random
def permuteregulations(graph):
"""Randomly change which regulations are repressions, maintaining activation and repression counts and directions."""
edges = list(graph.edges)
copy = graph.copy()
repressions = 0
for edge in edges:
edge_data = copy.edges[edge]
if edge_data['repress']:
repressions += 1
edge_data['repress'] = False
for new_repression in random.sample(edges, repressions):
copy.edges[new_repression]['repress'] = True
return copy | 76a12e573a6d053442c86bc81bebf10683d55dfb | 22,555 |
def editor_command(command):
"""
Is this an external editor command?
:param command: string
"""
# It is possible to have `\e filename` or `SELECT * FROM \e`. So we check
# for both conditions.
return command.strip().endswith('\\e') or command.strip().startswith('\\e ') | 0e80547b3c118bf01bd7a69e2d93fe8f65851ecf | 22,556 |
def blrObjFunction(initialWeights, *args):
"""
blrObjFunction computes 2-class Logistic Regression error function and
its gradient.
Input:
initialWeights: the weight vector (w_k) of size (D + 1) x 1
train_data: the data matrix of size N x D
labeli: the label vector (y_k) of size N x 1 where each entry can be either 0 or 1 representing the label of corresponding feature vector
Output:
error: the scalar value of error function of 2-class logistic regression
error_grad: the vector of size (D+1) x 1 representing the gradient of
error function
"""
train_data, labeli = args
n_data = train_data.shape[0]
n_features = train_data.shape[1]
error = 0
error_grad = np.zeros((n_features + 1, 1))
##################
# YOUR CODE HERE #
##################
# HINT: Do not forget to add the bias term to your input data
initw = initialWeights.reshape(n_feature + 1, 1)
inputWithBias = np.hstack((np.ones((n_data,1)),train_data))
out = sigmoid(np.dot(inputWithBias,initw))
a = np.sum((labeli * np.log(out))+(1.0 - labeli)*np.log(1.0 - out))
error = a * (-1/n_data)
b = np.sum(((out-labeli)* inputWithBias),axis=0)
error_grad = b/n_data
return error, error_grad | 3192982a54163868deffa9dfcce2a6f828b67abd | 22,557 |
from datetime import datetime
def edit_battle(battle_id):
"""
Edit battle form.
:param battle_id:
:return:
"""
battle = Battle.query.get(battle_id) or abort(404)
if battle.clan != g.player.clan and g.player.name not in config.ADMINS:
abort(403)
all_players = Player.query.filter_by(clan=g.player.clan, locked=False).order_by('lower(name)').all()
sorted_players = sorted(all_players, reverse=True, key=lambda p: p.player_role_value())
date = battle.date
map_name = battle.map_name
province = battle.map_province
battle_commander = battle.battle_commander
enemy_clan = battle.enemy_clan
battle_groups = BattleGroup.query.filter_by(clan=g.player.clan).order_by('date').all()
battle_result = battle.outcome_repr()
battle_group_final = battle.battle_group_final
players = battle.get_players()
description = battle.description
replay = battle.replay.unpickle()
duration = battle.duration
if battle.battle_group:
battle_group_description = battle.battle_group.description
else:
battle_group_description = ''
if request.method == 'POST':
players = map(int, request.form.getlist('players'))
map_name = request.form.get('map_name', '')
province = request.form.get('province', '')
enemy_clan = request.form.get('enemy_clan', '')
battle_result = request.form.get('battle_result', '')
battle_commander = Player.query.get(int(request.form['battle_commander']))
description = request.form.get('description', '')
battle_group = int(request.form['battle_group'])
battle_group_title = request.form.get('battle_group_title', '')
battle_group_description = request.form.get('battle_group_description', '')
battle_group_final = request.form.get('battle_group_final', '') == 'on'
duration = request.form.get('duration', 15 * 60)
errors = False
date = None
try:
date = datetime.datetime.strptime(request.form.get('date', ''), '%d.%m.%Y %H:%M:%S')
except ValueError:
flash(u'Invalid date format', 'error')
errors = True
if not map_name:
flash(u'Please enter the name of the map', 'error')
errors = True
if not battle_commander:
flash(u'No battle commander selected', 'error')
errors = True
if not players:
flash(u'No players selected', 'error')
errors = True
if not enemy_clan:
flash(u'Please enter the enemy clan\'s tag', 'errors')
errors = True
if not battle_result:
flash(u'Please select the correct outcome of the battle', 'errors')
errors = True
bg = None
if battle_group == -1:
# new group
bg = BattleGroup(battle_group_title, battle_group_description, g.player.clan, date)
elif battle_group >= 0:
# existing group
bg = BattleGroup.query.get(battle_group) or abort(500)
if bg.get_final_battle() is not None and bg.get_final_battle() is not battle and battle_group_final:
flash(u'Selected battle group already contains a battle marked as final')
errors = True
if not errors:
battle.date = date
battle.clan = g.player.clan
battle.enemy_clan = enemy_clan
battle.victory = battle_result == 'victory'
battle.draw = battle_result == 'draw'
battle.map_name = map_name
battle.map_province = province
battle.battle_commander_id = battle_commander.id
battle.description = description
battle.duration = duration
if bg:
battle.battle_group_final = battle_group_final
battle.battle_group = bg
db_session.add(bg)
else:
battle.battle_group = None
for ba in battle.attendances:
if not ba.reserve:
db_session.delete(ba)
for player_id in players:
player = Player.query.get(player_id)
if not player:
abort(404)
ba = BattleAttendance(player, battle, reserve=False)
db_session.add(ba)
db_session.add(battle)
db_session.commit()
logger.info(g.player.name + " updated the battle " + str(battle.id))
return redirect(url_for('battles_list', clan=g.player.clan))
return render_template('battles/edit.html', date=date, map_name=map_name, province=province, battle=battle,
battle_groups=battle_groups, duration=duration, battle_group_description=battle_group_description,
battle_commander=battle_commander, enemy_clan=enemy_clan, battle_result=battle_result,
battle_group_final=battle_group_final, players=players, description=description,
replay=replay, replays=replays, all_players=all_players, sorted_players=sorted_players) | 839a134441af0429ce141218931faef1d53f9938 | 22,558 |
def construct_epsilon_heli(epsilon_diag,
pitch,
divisions,
thickness,
handness="left"):
"""
construct the dielectric matrices of all layers
return a N*3*3 array where N is the number of layers
We define pitch to be the distance such the rotation is 180 degree e.g. apparant
period in z direction
"""
if pitch == thickness:
angles = np.linspace(0, -np.pi, divisions, endpoint=False)
elif pitch > thickness:
angles = np.linspace(
0, -np.pi * thickness / pitch, divisions, endpoint=False)
else:
raise NameError('Need thickness to be smaller than pitch')
return np.array(
[rotZ(i).dot(epsilon_diag.dot(rotZ(-i))) for i in angles]) | 3be04a06524c6011180584f39dea7651d43b5b46 | 22,559 |
def image_overlay(im_1, im_2, color=True, normalize=True):
"""Overlay two images with the same size.
Args:
im_1 (np.ndarray): image arrary
im_2 (np.ndarray): image arrary
color (bool): Whether convert intensity image to color image.
normalize (bool): If both color and normalize are True, will
normalize the intensity so that it has minimum 0 and maximum 1.
Returns:
np.ndarray: an overlay image of im_1*0.5 + im_2*0.5
"""
if color:
im_1 = intensity_to_rgb(np.squeeze(im_1), normalize=normalize)
im_2 = intensity_to_rgb(np.squeeze(im_2), normalize=normalize)
return im_1*0.5 + im_2*0.5 | 501a1465147e8b63c1a36c0cd7f2a1850f7a14b9 | 22,561 |
def get_next_seg(ea):
"""
Get next segment
@param ea: linear address
@return: start of the next segment
BADADDR - no next segment
"""
nextseg = ida_segment.get_next_seg(ea)
if not nextseg:
return BADADDR
else:
return nextseg.start_ea | 5ea0bf1ef889bad4013a86df237cca39a4934c4c | 22,563 |
from invenio_app_ils.items.api import ITEM_PID_TYPE
def validate_item_pid(item_pid):
"""Validate item or raise and return an obj to easily distinguish them."""
if item_pid["type"] not in [BORROWING_REQUEST_PID_TYPE, ITEM_PID_TYPE]:
raise UnknownItemPidTypeError(pid_type=item_pid["type"])
# inline object with properties
return type(
"obj",
(object,),
{
"is_item": item_pid["type"] == ITEM_PID_TYPE,
"is_brw_req": item_pid["type"] == BORROWING_REQUEST_PID_TYPE,
},
) | f1e5c59e43787a736cb99c51a74e562f6a1c636f | 22,564 |
import _ctypes
def save_as_png(prs: pptx.presentation.Presentation, save_folder: str, overwrite: bool = False) -> bool:
"""
Save presentation as PDF.
Requires to save a temporary *.pptx first.
Needs module comtypes (windows only).
Needs installed PowerPoint.
Note: you have to give full path for save_folder, or PowerPoint might cause random exceptions.
"""
result = False
with TemporaryPPTXFile() as f:
prs.save(f.name)
try:
result = save_pptx_as_png(save_folder, f.name, overwrite)
except _ctypes.COMError as e:
print(e)
print("Couldn't save PNG file due to communication error with PowerPoint.")
result = False
return result | bf982eb1b5395e4602f00859c73a1924fca638b9 | 22,566 |
import json
def http_post(request):
"""HTTP Cloud Function.
Args:
request (flask.Request): The request object.
<https://flask.palletsprojects.com/en/1.1.x/api/#incoming-request-data>
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
<https://flask.palletsprojects.com/en/1.1.x/api/#flask.make_response>.
"""
# Init an empty json response
response_data = {}
request_json = request.get_json(silent=True)
request_args = request.args
if request_json and 'signed_message' in request_json:
# Grab input values
signed_message = request_json['signed_message']
elif request_args and 'signed_message' in request_args:
# Grab input values
signed_message = request_args['signed_message']
else:
response_data['status'] = 'Invalid request parameters'
return json.dumps(response_data)
# Load the QR Code Back up and Return
response_data['qr_code'] = pyqrcode.create(signed_message).png_as_base64_str(scale=2)
response_data['status'] = 'Message Created'
return json.dumps(response_data) | dd82b624a3d2cf37c1cb2538cdc8d26447f3e029 | 22,567 |
def create_incident_field_context(incident):
"""Parses the 'incident_fields' entry of the incident and returns it
Args:
incident (dict): The incident to parse
Returns:
list. The parsed incident fields list
"""
incident_field_values = dict()
for incident_field in incident.get('incident_field_values', []):
incident_field_values[incident_field['name'].replace(" ", "_")] = incident_field['value']
return incident_field_values | 1a56c5b76c4c82827f8b7febde30e2881e6f0561 | 22,569 |
def create_profile(body, user_id): # noqa: E501
"""Create a user profile
# noqa: E501
:param body:
:type body: dict | bytes
:param user_id: The id of the user to update
:type user_id: int
:rtype: None
"""
if connexion.request.is_json:
json = connexion.request.get_json()
json["user_id"] = user_id
profile = ProfileService().insert_profile(json)
return profile
return "Whoops..." | ff00d3a65f0e10ec3f90d0b1139033cf004d560a | 22,570 |
def load_global_recovered() -> pd.DataFrame:
"""Loads time series data for global COVID-19 recovered cases
Returns:
pd.DataFrame: A pandas dataframe with time series data for global COVID-19 recovered cases
"""
return load_csv(global_recovered_cases_location) | bb7702d3cd597dbc12314804d0d0a09f4c28d72c | 22,571 |
import urllib
def build_url(self, endpoint):
"""
Builds a URL given an endpoint
Args:
endpoint (Endpoint: str): The endpoint to build the URL for
Returns:
str: The URL to access the given API endpoint
"""
return urllib.parse.urljoin(self.base_url, endpoint) | e31bead2e87cea82c237df06bf00085dc8a3c04d | 22,572 |
def neighbors(i, diag = True,inc_self=False):
"""
determine the neighbors, returns a set with neighboring tuples {(0,1)}
if inc_self: returns self in results
if diag: return diagonal moves as well
"""
r = [1,0,-1]
c = [1,-1,0]
if diag:
if inc_self:
return {(i[0]+dr, i[1]+dc) for dr in r for dc in c}
else:
return {(i[0]+dr, i[1]+dc) for dr in r for dc in c if not (dr == 0 and dc == 0)}
else:
res = {(i[0],i[1]+1), (i[0],i[1]-1),(i[0]+1,i[1]),(i[0]-1,i[1])}
if inc_self: res.add(i)
return res | 3d4ca12795fa1d3d7e7f8f231cdf0f12257da7e0 | 22,573 |
import torch
def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3
Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
tau: non-negative scalar temperature
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probability distribution that sums to 1 across classes
Constraints:
- this implementation only works on batch_size x num_features tensor for now
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps)
if hard:
shape = logits.size()
_, k = y_soft.data.max(-1)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
y_hard = torch.zeros(*shape)
if y_soft.is_cuda:
y_hard = y_hard.cuda()
y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
y = y_hard - y_soft.data + y_soft
else:
y = y_soft
return y | 3d512e47771ecac396e757e4b7b8db9030b89f46 | 22,575 |
from typing import List
import re
def decompose_f_string(f_string: str) -> (List[str], List[str]):
"""
Decompose an f-string into the list of variable names and the separators between them.
An f-string is any string that contains enclosed curly brackets around text.
A variable is defined as the text expression within the enclosed curly brackets.
The separators are the strings remnants that surround the variables.
An example f-string and components would be: 'This is {an} f-string!', with variable 'an' and separators
'This is ' and ' f-string!'.
An instance of this example would be: 'This is definetely a good f-string!' with variable value 'definetely a good'.
Example
-------
variable_names, separators = decompose_f_string(f_string="a/{x}b{y}/c{z}")
# variable_names = ["x", "y", "z"]
# separators = ["a/", "b", "/c"", ""]
"""
matches = re.findall("{.*?}", f_string) # {.*?} optionally matches any characters enclosed by curly brackets
variable_names = [match.lstrip("{").rstrip("}") for match in matches]
assert not any(
(variable_name == "" for variable_name in variable_names)
), "Empty variable name detected in f-string! Please ensure there is text between all enclosing '{' and '}'."
pattern = "^.*?{|}.*?{|}.*?$"
# Description: patttern matches the all expressions outside of curly bracket enclosures
# .*?{ optionally matches any characters optionally before curly bracket opening
# | logical 'or'
# }.*?{ between a curly bracket closure and opening
# |
# }.*? after a closure
separators = [x.rstrip("{").lstrip("}") for x in re.findall(pattern=pattern, string=f_string)]
if any((separator == "" for separator in separators[1:-1])):
warn(
"There is an empty separator between two variables in the f-string! "
"The f-string will not be uniquely invertible."
)
return variable_names, separators | c463c8189539fd0c2c14e2c5620cafc9820c0f41 | 22,576 |
def process(register, instructions):
"""Process instructions on copy of register."""
cur_register = register.copy()
cur_index = 0
while cur_index < len(instructions):
cur_instruction = instructions[cur_index]
cur_index += process_instruction(cur_register, cur_instruction)
return cur_register | 5a204828261d8408467d9b17976728780db76d1d | 22,577 |
def bearing_radians(lat1, lon1, lat2, lon2):
"""Initial bearing"""
dlon = lon2 - lon1
y = sin(dlon) * cos(lat2)
x = cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(dlon)
return atan2(y, x) | 613a5496b58e09a1b79c0576e90ff2b6f49df31d | 22,578 |
import logging
import json
def RunSimulatedStreaming(vm):
"""Spawn fio to simulate streaming and gather the results.
Args:
vm: The vm that synthetic_storage_workloads_benchmark will be run upon.
Returns:
A list of sample.Sample objects
"""
test_size = min(vm.total_memory_kb / 10, 1000000)
iodepth_list = FLAGS.iodepth_list or DEFAULT_STREAMING_SIMULATION_IODEPTH_LIST
results = []
for depth in iodepth_list:
cmd = (
'--filesize=10g '
'--directory=%s '
'--ioengine=libaio '
'--overwrite=0 '
'--invalidate=1 '
'--direct=1 '
'--randrepeat=0 '
'--iodepth=%s '
'--blocksize=1m '
'--size=%dk '
'--filename=fio_test_file ') % (vm.GetScratchDir(),
depth,
test_size)
if FLAGS.maxjobs:
cmd += '--max-jobs=%s ' % FLAGS.maxjobs
cmd += (
'--name=sequential_write '
'--rw=write '
'--end_fsync=1 '
'--name=sequential_read '
'--stonewall '
'--rw=read ')
logging.info('FIO Results for simulated %s', STREAMING)
res, _ = vm.RemoteCommand('%s %s' % (fio.FIO_CMD_PREFIX, cmd),
should_log=True)
results.extend(
fio.ParseResults(fio.FioParametersToJob(cmd), json.loads(res)))
UpdateWorkloadMetadata(results)
return results | 417898b96223eb28d1d999adaad137c2e9d9e30c | 22,579 |
def get_all_tutorial_info():
"""
Tutorial route to get tutorials with steps
Parameters
----------
None
Returns
-------
Tutorials with steps
"""
sql_query = "SELECT * FROM diyup.tutorials"
cur = mysql.connection.cursor()
cur.execute(sql_query)
tutorials = cur.fetchall()
output = []
for tutorial in tutorials:
tutorial_data = {}
tutorial_data['uuid'] = tutorial[0]
tutorial_data['author_username'] = tutorial[1]
tutorial_data['title'] = tutorial[2]
tutorial_data['image'] = tutorial[3]
tutorial_data['category'] = tutorial[4]
tutorial_data['description'] = tutorial[5]
tutorial_data['author_difficulty'] = str(tutorial[6])
tutorial_data['viewer_difficulty'] = \
str(average_rating_type_for_tutorial('difficulty', tutorial[0]))
tutorial_data['rating'] = \
str(average_rating_type_for_tutorial('score', tutorial[0]))
sql_query = "SELECT * FROM diyup.steps WHERE tutorial_uuid=%s"
cur.execute(sql_query, (tutorial[0],))
steps = cur.fetchall()
output_steps = []
for step in steps:
step_data = {}
step_data['index'] = step[1]
step_data['content'] = step[2]
step_data['image'] = step[3]
output_steps.append(step_data)
tutorial_data['steps'] = output_steps
output.append(tutorial_data)
cur.close()
return jsonify({'tutorials' : output}), 200 | 2565427a617ce042af9165963f7676877c97dd16 | 22,581 |
from datetime import datetime
def parse_date(datestring, default_timezone=UTC):
"""Parses ISO 8601 dates into datetime objects
The timezone is parsed from the date string. However it is quite common to
have dates without a timezone (not strictly correct). In this case the
default timezone specified in default_timezone is used. This is UTC by
default.
"""
if not isinstance(datestring, basestring):
raise ParseError("Expecting a string %r" % datestring)
m = ISO8601_REGEX.match(datestring)
if not m:
raise ParseError("Unable to parse date string %r" % datestring)
groups = m.groupdict()
tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
if groups["fraction"] is None:
groups["fraction"] = 0
else:
groups["fraction"] = int(float("0.%s" % groups["fraction"]) * 1e6)
return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]),
int(groups["hour"]), int(groups["minute"]), int(groups["second"]),
int(groups["fraction"]), tz) | 41058b1a825a9c0ee133327001ada1834c3c1732 | 22,582 |
def BigSpectrum_to_H2COdict(sp, vrange=None):
"""
A rather complicated way to make the spdicts above given a spectrum...
"""
spdict = {}
for linename,freq in pyspeckit.spectrum.models.formaldehyde.central_freq_dict.iteritems():
if vrange is not None:
freq_test_low = freq - freq * vrange[0]/pyspeckit.units.speedoflight_kms
freq_test_high = freq - freq * vrange[1]/pyspeckit.units.speedoflight_kms
else:
freq_test_low = freq_test_high = freq
if (sp.xarr.as_unit('Hz').in_range(freq_test_low) or
sp.xarr.as_unit('Hz').in_range(freq_test_high)):
spdict[linename] = sp.copy()
spdict[linename].xarr.convert_to_unit('GHz')
spdict[linename].xarr.refX = freq
spdict[linename].xarr.refX_units = 'Hz'
#spdict[linename].baseline = copy.copy(sp.baseline)
#spdict[linename].baseline.Spectrum = spdict[linename]
spdict[linename].specfit = sp.specfit.copy(parent=spdict[linename])
spdict[linename].xarr.convert_to_unit('km/s')
if vrange is not None:
try:
spdict[linename].crop(*vrange, units='km/s')
except IndexError:
# if the freq in range, but there's no data in range, remove
spdict.pop(linename)
return spdict | 961e4dd676332efea084fd87d9108337ce56fbe2 | 22,583 |
def get_thickness_model(model):
"""
Return a function calculating an adsorbate thickness.
The ``model`` parameter is a string which names the thickness equation which
should be used. Alternatively, a user can implement their own thickness model, either
as an experimental isotherm or a function which describes the adsorbed layer. In that
case, instead of a string, pass the Isotherm object or the callable function as the
``model`` parameter.
Parameters
----------
model : str or callable
Name of the thickness model to use.
Returns
-------
callable
A callable that takes a pressure in and returns a thickness
at that point.
Raises
------
ParameterError
When string is not in the dictionary of models.
"""
# If the model is a string, get a model from the _THICKNESS_MODELS
if isinstance(model, str):
if model not in _THICKNESS_MODELS:
raise ParameterError(
f"Model {model} not an implemented thickness function. ",
f"Available models are {_THICKNESS_MODELS.keys()}"
)
return _THICKNESS_MODELS[model]
# If the model is an callable, return it instead
else:
return model | 1573206c331cbb4f770ed21cea88f73d13fea385 | 22,584 |
import aiohttp
def http(session: aiohttp.ClientSession) -> Handler:
"""`aiohttp` based request handler.
:param session:
"""
async def handler(request: Request) -> Response:
async with session.request(
request.method,
request.url,
params=request.params or None,
data=request.form_data or None,
json=request.data or None,
headers=request.headers or None,
) as response:
return Response(
status=response.status,
reason=response.reason,
headers=response.headers,
data=await response.json(encoding='utf-8'),
)
return handler | 2628774af37c44a42c74ab8844b2f5d37200eaa9 | 22,585 |
def remove_package_repo_and_wait(repo_name, wait_for_package):
""" Remove a repository from the list of package sources, then wait for the removal to complete
:param repo_name: name of the repository to remove
:type repo_name: str
:param wait_for_package: the package whose version should change after the repo is removed
:type wait_for_package: str
:returns: True if successful, False otherwise
:rtype: bool
"""
return remove_package_repo(repo_name, wait_for_package) | 14b8d261c58ba07d12fd9737392858a541b8deb1 | 22,586 |
from typing import Callable
from typing import List
def lyndon_of_word(word : str, comp: Callable[[List[str]],str] = min ) -> str:
"""
Returns the Lyndon representative among set of circular shifts,
that is the minimum for th lexicographic order 'L'<'R'
:code:`lyndon_of_word('RLR')`.
Args:
`word` (str): a word (supposedly binary L&R)
`comp` ( Callable[List[str],str] ): comparision function min or max
Returns:
str: list of circular shifts
:Example:
>>> lyndon_of_word('LRRLRLL')
'LLLRRLR'
"""
if word == '':
return ''
return comp(list_of_circular_shifts(word)) | c4195244488de555871e02260c733a28a882481a | 22,587 |
def num_of_visited_nodes(driver_matrix):
""" Calculate the total number of visited nodes for multiple paths.
Args:
driver_matrix (list of lists): A list whose members are lists that
contain paths that are represented by consecutively visited nodes.
Returns:
int: Number of visited nodes
"""
return sum(len(x) for x in driver_matrix) | 2a1244cd033029cec4e4f7322b9a27d01ba4abd5 | 22,588 |
def gen_custom_item_windows_file(description, info, value_type, value_data,
regex, expect):
"""Generates a custom item stanza for windows file contents audit
Args:
description: string, a description of the audit
info: string, info about the audit
value_type: string, "POLICY_TEXT" -- included for parity with other
gen_* modules.
value_data: string, location of remote file to check
regex: string, regular expression to check file for
expect: string, regular expression to match for a pass
Returns:
A list of strings to put in the main body of a Windows file audit file.
"""
out = []
out.append('')
out.append('<custom_item>')
out.append(' type: FILE_CONTENT_CHECK')
out.append(' description: "%s"' % description.replace("\n", " "))
out.append(' info: "%s"' % info.replace("\n", " "))
out.append(' value_type: %s' % value_type)
out.append(' value_data: "%s"' % value_data)
out.append(' regex: "%s"' % regex)
out.append(' expect: "%s"' % expect)
out.append('</custom_item>')
out.append(' ')
return out | 3d0335d91eb700d30d5ae314fce13fc4a687d766 | 22,589 |
import inspect
def create_signature(args=None, kwargs=None):
"""Create a inspect.Signature object based on args and kwargs.
Args:
args (list or None): The names of positional or keyword arguments.
kwargs (list or None): The keyword only arguments.
Returns:
inspect.Signature
"""
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
parameter_objects = []
for arg in args:
param = inspect.Parameter(
name=arg,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
)
parameter_objects.append(param)
for arg in kwargs:
param = inspect.Parameter(
name=arg,
kind=inspect.Parameter.KEYWORD_ONLY,
)
parameter_objects.append(param)
sig = inspect.Signature(parameters=parameter_objects)
return sig | 011acccada7896e11e2d9bb73dcf03d7dc6e751e | 22,590 |
import json
def select(type, name, optional):
"""Select data from data.json file"""
with open('data.json', 'r') as f:
data = json.load(f)
for i in data[type]:
if i == data[name]:
return data[optional] | f784137127cd77af2db6e4ac653dc360515ec056 | 22,591 |
def perform_step(polymer: str, rules: dict) -> str:
"""
Performs a single step of polymerization by performing all applicable insertions; returns new polymer template string
"""
new = [polymer[i] + rules[polymer[i:i+2]] for i in range(len(polymer)-1)]
new.append(polymer[-1])
return "".join(new) | c60f760ef6638ff3a221aff4a56dccbeae394709 | 22,592 |
import json
def load_datasets(json_file):
"""load dataset described in JSON file"""
datasets = {}
with open(json_file, 'r') as fd:
config = json.load(fd)
all_set_path = config["Path"]
for name, value in config["Dataset"].items():
assert isinstance(value, dict)
datasets[name] = Dataset()
for i in value:
if not i in ('train', 'val', 'test'):
continue
sets = []
for j in to_list(value[i]):
try:
sets += list(_glob_absolute_pattern(all_set_path[j]))
except KeyError:
sets += list(_glob_absolute_pattern(j))
datasets[name].__setitem__(i, sets)
if 'param' in value:
for k, v in value['param'].items():
datasets[name].__setitem__(k, v)
return datasets | d34d3e79582db9f0682909a88d697edbf0ef75e3 | 22,593 |
def instantiate_descriptor(**field_data):
"""
Instantiate descriptor with most properties.
"""
system = get_test_descriptor_system()
course_key = CourseLocator('org', 'course', 'run')
usage_key = course_key.make_usage_key('html', 'SampleHtml')
return system.construct_xblock_from_class(
HtmlBlock,
scope_ids=ScopeIds(None, None, usage_key, usage_key),
field_data=DictFieldData(field_data),
) | 6a640d1d66818898951298750a819d12e24c74e9 | 22,594 |
import time
def simple_switch(M_in, P_in, slack=1, animate=True, cont=False, gen_pos=None, verbose=True):
"""
A simple switch algorithm. When encountering a change in sequence, compare the value
of the switch to the value of the current state, switch if it's more. The default value
function sum(exp(length(adjoint sequences))) where length is measured in the input arrays.
"""
start_time = time.time()
M, P = np.copy(M_in), np.copy(P_in)
M_track, P_track = np.zeros_like(M), np.ones_like(P)
value_function = exp_len_value if not cont else continuity_value
if animate:
history = np.array([M,P])
for w in range(slack+1):
M, P = blurr_slack(M,w), blurr_slack(P,w) # if slack w, then sequences of length w don't make any sense
if animate:
history = np.dstack([history, [M,P]])
for i in range(1,len(M)-w):
if M[i] != M[i-1] or P[i] != P[i-1]:
val = value_function(M,P,i-1,i,gen_pos)
M_temp = np.concatenate([M[:i], [P[i+w]]*w, P[i+w:]])
P_temp = np.concatenate([P[:i], [M[i+w]]*w, M[i+w:]])
switch_val = value_function(M_temp,P_temp,i-1,i,gen_pos)
if switch_val > val and not is_steeling(M,P,i,w):
# print(i)
M, P = np.copy(M_temp), np.copy(P_temp)
M_track, P_track = track_switch(M_track, P_track, i)
if animate:
history = np.dstack([history, [M,P]])
ani = None
if animate:
# make it stop on the end for a while
for _ in range(20):
history = np.dstack([history, [M,P]])
ani = animate_history(history)
if verbose:
print("Solving time:", time.time()-start_time, "seconds")
return M,P,M_track,P_track,ani | 709f3eeab1fe498cb0a5b9a765c44d427a03b4c4 | 22,595 |
def drop_duplicates_by_type_or_node(n_df, n1, n2, typ):
"""
Drop the duplicates in the network, by type or by node.
For each set of "duplicate" edges, only the edge with the maximum weight
will be kept.
By type, the duplicates are where nd1, nd2, and typ are identical; by node,
the duplicates are where nd1, and nd2 are identical.
Parameters:
n_df (list): the data
n1 (int): the column for the firts node
n2 (int): the column for the second node
typ (int): the column for the type
Returns:
list: the modified data
"""
# If n_df is sorted, this method will work, iterating through the
# rows and only keeping the first row of a group of duplicate rows
prev_nd1_val = None
prev_nd2_val = None
prev_type_val = None
new_n_df = []
for row in n_df:
nd1_val = row[n1]
nd2_val = row[n2]
type_val = row[typ]
nodes_differ = nd1_val != prev_nd1_val or nd2_val != prev_nd2_val
type_differs = type_val != prev_type_val
if (DROP_DUPLICATES_METHOD == 'node' and nodes_differ) or (nodes_differ or type_differs):
new_n_df.append(row)
prev_nd1_val = nd1_val
prev_nd2_val = nd2_val
prev_type_val = type_val
return new_n_df | 015679f5a2625792ef57b49994408746440ce15c | 22,596 |
def voting(labels):
""" Majority voting. """
return sitk.LabelVoting(labels, 0) | 52fa5c2cfbe3551a676904ea1c2f3c6514833ba7 | 22,597 |
def user_city_country(obj):
"""Get the location (city, country) of the user
Args:
obj (object): The user profile
Returns:
str: The city and country of user (if exist)
"""
location = list()
if obj.city:
location.append(obj.city)
if obj.country:
location.append(obj.country)
if len(location):
return ", ".join(str(i) for i in location)
return 'Not available' | be4238246042371215debb608934b89b63a07dab | 22,598 |
def test_encrypted_parquet_write_kms_error(tempdir, data_table,
basic_encryption_config):
"""Write an encrypted parquet, but raise KeyError in KmsClient."""
path = tempdir / 'encrypted_table_kms_error.in_mem.parquet'
encryption_config = basic_encryption_config
# Empty master_keys_map
kms_connection_config = pe.KmsConnectionConfig()
def kms_factory(kms_connection_configuration):
# Empty master keys map will cause KeyError to be raised
# on wrap/unwrap calls
return InMemoryKmsClient(kms_connection_configuration)
crypto_factory = pe.CryptoFactory(kms_factory)
with pytest.raises(KeyError, match="footer_key"):
# Write with encryption properties
write_encrypted_parquet(path, data_table, encryption_config,
kms_connection_config, crypto_factory) | aeeffecf5ca38907506ce79b96c823652cd3ef99 | 22,599 |
import codecs
async def putStorBytes(app, key, data, filter_ops=None, bucket=None):
""" Store byte string as S3 object with given key
"""
client = _getStorageClient(app)
if not bucket:
bucket = app['bucket_name']
if key[0] == '/':
key = key[1:] # no leading slash
shuffle = -1 # auto-shuffle
clevel = 5
cname = None # compressor name
if filter_ops:
if "compressor" in filter_ops:
cname = filter_ops["compressor"]
if "use_shuffle" in filter_ops and not filter_ops['use_shuffle']:
shuffle = 0 # client indicates to turn off shuffling
if "level" in filter_ops:
clevel = filter_ops["level"]
msg = f"putStorBytes({bucket}/{key}), {len(data)} bytes shuffle: {shuffle}"
msg += f" compressor: {cname} level: {clevel}"
log.info(msg)
if cname:
try:
blosc = codecs.Blosc(cname=cname, clevel=clevel, shuffle=shuffle)
cdata = blosc.encode(data)
# TBD: add cname in blosc constructor
msg = f"compressed from {len(data)} bytes to {len(cdata)} bytes "
msg += f"using filter: {blosc.cname} with level: {blosc.clevel}"
log.info(msg)
data = cdata
except Exception as e:
log.error(f"got exception using blosc encoding: {e}")
raise HTTPInternalServerError()
rsp = await client.put_object(key, data, bucket=bucket)
return rsp | f58ff0c9073e2ce7dce19b2c586abc14af792590 | 22,600 |
def unique_boxes(boxes, scale=1.0):
"""Return indices of unique boxes."""
assert boxes.shape[1] == 4, 'Func doesnot support tubes yet'
v = np.array([1, 1e3, 1e6, 1e9])
hashes = np.round(boxes * scale).dot(v)
_, index = np.unique(hashes, return_index=True)
return np.sort(index) | 951f0b6f0d51212ad63e787a32c78d14f7e11bd1 | 22,602 |
def dataloader(loader, mode):
"""Sets batchsize and repeat for the train, valid, and test iterators.
Args:
loader: tfds.load instance, a train, valid, or test iterator.
mode: string, set to 'train' for use during training;
set to anything else for use during validation/test
Returns:
An iterator for features and labels tensors.
"""
loader = loader.map(process_images)
repeat = 1
if mode == 'train':
repeat = None
loader = loader.shuffle(1000 * FLAGS.batch_size)
return loader.batch(
FLAGS.batch_size).repeat(repeat).prefetch(tf.data.experimental.AUTOTUNE) | b15b736919c21df142e2d4815f33b24dc0f01e5f | 22,603 |
def sub_inplace(X, varX, Y, varY):
"""In-place subtraction with error propagation"""
# Z = X - Y
# varZ = varX + varY
X -= Y
varX += varY
return X, varX | 646578886c37003eb860134b93db95e6b4d73ed7 | 22,604 |
def inv_logtransform(plog):
""" Transform the power spectrum for the log field to the power spectrum of delta.
Inputs
------
plog - power spectrum of log field computed at points on a Fourier grid
Outputs
-------
p - power spectrum of the delta field
"""
xi_log = np.fft.ifftn(plog)
xi = np.exp(xi_log) - 1
p = np.fft.fftn(xi).real.astype('float')
return p | aaf414796e5dfd5ede71dd8e18017f46b7761a39 | 22,605 |
import builtins
def ipv6_b85decode(encoded,
_base85_ords=RFC1924_ORDS):
"""Decodes an RFC1924 Base-85 encoded string to its 128-bit unsigned integral
representation. Used to base85-decode IPv6 addresses or 128-bit chunks.
Whitespace is ignored. Raises an ``OverflowError`` if stray characters
are found.
:param encoded:
RFC1924 Base85-encoded string.
:param _base85_ords:
(Internal) Look up table.
:returns:
A 128-bit unsigned integer.
"""
if not builtins.is_bytes(encoded):
raise TypeError("Encoded sequence must be bytes: got %r" %
type(encoded).__name__)
# Ignore whitespace.
encoded = EMPTY_BYTE.join(encoded.split())
if len(encoded) != 20:
raise ValueError("Not 20 encoded bytes: %r" % encoded)
#uint128 = 0
#for char in encoded:
# uint128 = uint128 * 85 + _base85_ords[byte_ord(char)]
# Above loop unrolled to process 4 5-tuple chunks instead:
try:
#v, w, x, y, z = encoded[0:5]
# v = encoded[0]..z = encoded[4]
uint128 = ((((_base85_ords[encoded[0]] *
85 + _base85_ords[encoded[1]]) *
85 + _base85_ords[encoded[2]]) *
85 + _base85_ords[encoded[3]]) *
85 + _base85_ords[encoded[4]])
#v, w, x, y, z = encoded[5:10]
# v = encoded[5]..z = encoded[9]
uint128 = (((((uint128 * 85 + _base85_ords[encoded[5]]) *
85 + _base85_ords[encoded[6]]) *
85 + _base85_ords[encoded[7]]) *
85 + _base85_ords[encoded[8]]) *
85 + _base85_ords[encoded[9]])
#v, w, x, y, z = encoded[10:15]
# v = encoded[10]..z = encoded[14]
uint128 = (((((uint128 * 85 + _base85_ords[encoded[10]]) *
85 + _base85_ords[encoded[11]]) *
85 + _base85_ords[encoded[12]]) *
85 + _base85_ords[encoded[13]]) *
85 + _base85_ords[encoded[14]])
#v, w, x, y, z = encoded[15:20]
# v = encoded[15]..z = encoded[19]
uint128 = (((((uint128 * 85 + _base85_ords[encoded[15]]) *
85 + _base85_ords[encoded[16]]) *
85 + _base85_ords[encoded[17]]) *
85 + _base85_ords[encoded[18]]) *
85 + _base85_ords[encoded[19]])
except KeyError:
raise OverflowError("Cannot decode `%r -- may contain stray "
"ASCII bytes" % encoded)
if uint128 > UINT128_MAX:
raise OverflowError("Cannot decode `%r` -- may contain stray "
"ASCII bytes" % encoded)
return uint128
# I've left this approach in here to warn you to NOT use it.
# This results in a massive amount of calls to byte_ord inside
# tight loops. | 324ec9835c7228bf406a8b33450c530b7191c4a0 | 22,606 |
def relabel_sig(sig:BaseSignature, arg_map:TDict[str, str]=None,
new_vararg:str=None, kwarg_map:TDict[str, str]=None,
new_varkwarg:str=None,
output_map:TDict[str, str]=None) -> BaseSigMap:
"""
Given maps along which to rename signature elements, generate a new
signature and an associated signature mapping from the original signature to
the new signature.
"""
arg_map = {} if arg_map is None else arg_map
kwarg_map = {} if kwarg_map is None else kwarg_map
if output_map is not None and (not sig.has_fixed_outputs):
raise ValueError()
output_map = {} if output_map is None else output_map
defaults_key_map = {**arg_map, **kwarg_map}
ord_args = [(arg_map[name], tp) for name, tp in sig.ord_poskw]
vararg = None if sig.vararg is None else (new_vararg, sig.vararg[1])
kwargs = {kwarg_map[name]: tp for name, tp in sig.kw.items()}
varkwarg = None if sig.varkwarg is None else (new_varkwarg, sig.varkwarg[1])
if sig.has_fixed_outputs:
ord_outputs = [(output_map[name], tp) for name, tp in sig.ord_outputs]
fixed_outputs = True
else:
ord_outputs = None
fixed_outputs = False
defaults = {defaults_key_map[k]: v for k, v in sig.defaults.items()}
renamed_sig = Signature(
ord_poskw=ord_args, kw=kwargs,
ord_outputs=ord_outputs, vararg=vararg,
varkwarg=varkwarg, defaults=defaults,
fixed_outputs=fixed_outputs
)
sig_map = SigMap(source=sig, target=renamed_sig, kwarg_map=kwarg_map)
return sig_map | a6b7ab1f8e8d3104938a6b1cecc609fb8a3aa1a0 | 22,607 |
import functools
def inferred_batch_shape_tensor(batch_object,
bijector_x_event_ndims=None,
**parameter_kwargs):
"""Infers an object's batch shape from its parameters.
Each parameter contributes a batch shape of
`base_shape(parameter)[:-event_ndims(parameter)]`, where a parameter's
`base_shape` is its batch shape if it defines one (e.g., if it is a
Distribution, LinearOperator, etc.), and its Tensor shape otherwise,
and `event_ndims` is as annotated by
`batch_object.parameter_properties()[parameter_name].event_ndims`.
Parameters with structured batch shape
(in particular, non-autobatched JointDistributions) are not currently
supported.
Args:
batch_object: Python object, typically a `tfd.Distribution` or
`tfb.Bijector`. This must implement the method
`batched_object.parameter_properties()` and expose a dict
`batched_object.parameters` of the parameters passed to its constructor.
bijector_x_event_ndims: If `batch_object` is a bijector, this is the
(structure of) integer(s) value of `x_event_ndims` in the current context
(for example, as passed to `experimental_batch_shape`). Otherwise, this
argument should be `None`.
Default value: `None`.
**parameter_kwargs: Optional keyword arguments overriding parameter values
in `batch_object.parameters`. Typically this is used to avoid multiple
Tensor conversions of the same value.
Returns:
batch_shape_tensor: `Tensor` broadcast batch shape of all parameters.
"""
batch_shapes = map_fn_over_parameters_with_event_ndims(
batch_object,
get_batch_shape_tensor_part,
bijector_x_event_ndims=bijector_x_event_ndims,
require_static=False,
**parameter_kwargs)
return functools.reduce(ps.broadcast_shape, tf.nest.flatten(batch_shapes), []) | d3c3a40f36c66ef28f6aeaddbdaa7dff5729f1ed | 22,608 |
from datetime import datetime
def fsevent_log(self, event_id_status_message):
"""Amend filesystem event history with a logging message
"""
event_id = event_id_status_message[0]
status = event_id_status_message[1]
message = event_id_status_message[2]
dbc = db_collection()
history_entry = {
'state': fsevents.UPDATED,
'message': message,
'timestamp': datetime.datetime.now()
}
dbc.update_one({'id': event_id}, {'$push': {
'history': history_entry
}},
upsert=True)
return (event_id, status) | 64cd88762e2775172bcf4c894c5187c373db3ee8 | 22,609 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.