content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def create_decode_network(width=width, height=height, Din=Din, Dout=Dout, d_range=d_range):
"""
data flow with traffic on:
input IO ->
tag horn ->
(pre-fifo valve) ->
FIFO ->
(post-fifo valve) ->
TAT ->
AER_tx ->
neurons ->
AER_rx ->
(neuron output valve) ->
PAT ->
accumulator ->
(pre-fifo valve) ->
FIFO ->
(post-fifo valve) ->
TAT ->
tag funnel ->
output IO
"""
N = width * height
net = graph.Network("net")
min_d, max_d = d_range
decoders = np.ones((Dout, N)) * (max_d - min_d) + min_d
tap_matrix = np.zeros((N, Din))
if Din == 1:
# one synapse per 4 neurons
for x in range(0, width, 2):
for y in range(0, height, 2):
n = y * width + x
if x < width // 2:
tap_matrix[n, 0] = 1
else:
tap_matrix[n, 0] = -1
else:
print("need to implement reasonable taps for Din > 1")
assert(False)
i1 = net.create_input("i1", Din)
p1 = net.create_pool("p1", tap_matrix)
b1 = net.create_bucket("b1", Dout)
o1 = net.create_output("o1", Dout)
net.create_connection("c_i1_to_p1", i1, p1, None)
decoder_conn = net.create_connection("c_p1_to_b1", p1, b1, decoders)
net.create_connection("c_b1_to_o1", b1, o1, None)
return net | bce65caa463bea8a582426bfe9fac08617fca812 | 8,504 |
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold) | df3ede87458939e7648090517828e2056cd9cfd6 | 8,505 |
def dGcthetalnorm(w: Wilson, cthetal):
"""Normalized distribution 1D cthetal"""
return tauBp / Btaul * dGcthetal(w, cthetal) | b925c6dad2dd6327f3fe250771c19018ecedcf14 | 8,507 |
from typing import Optional
def user_deposit_address_fixture(
deploy_smart_contract_bundle_concurrently: FixtureSmartContracts,
) -> Optional[UserDepositAddress]:
""" Deploy UserDeposit and fund accounts with some balances """
services_smart_contracts = deploy_smart_contract_bundle_concurrently.services_smart_contracts
if services_smart_contracts:
return services_smart_contracts.user_deposit_proxy.address
return None | 496f27fd9576191e91ac90c6e17c2b07fae629ab | 8,508 |
def vonNeumann(t, rho, H):
"""(quantum Liouville-)von Neumann equation"""
H = H(t)
rho = rho.reshape(H.shape)
rho_dot = -1j*(np.dot(H, rho) - np.dot(rho, H))
return rho_dot.flatten() | e00f9cdadacf36ba40240018d4b1dac1a7ebbba3 | 8,509 |
def nicer_array(a, mm_cutoff=0.3):
"""
Returns a scaled array, the scaling, and a unit prefix
Example:
nicer_array( np.array([2e-10, 3e-10]) )
Returns:
(array([200., 300.]), 1e-12, 'p')
"""
if np.isscalar(a):
x = a
elif len(a) == 1:
x = a[0]
else:
x = np.array(a)
fac, prefix = nicer_scale_prefix( x, mm_cutoff=mm_cutoff )
return a/fac, fac, prefix | e5abe6b45a4c80d8eb84d9f9f5aed1b11f19684e | 8,511 |
import uuid
import pickle
def build_playground():
"""
build a playground based on user's input building and algorithm type
input: userid, algorithm, target building
output: none
"""
userid, building, algo_type = request.form['userid'], request.form['building'], request.form['algo_type']
user = User.objects(userid=userid).first()
pgid = str(uuid.uuid4())
algo_instance = get_algo_instance(algo_type=algo_type, target_building=building, pgid=pgid)
algo_binaries = pickle.dumps(algo_instance, protocol=pickle.HIGHEST_PROTOCOL)
objs = RawMetadata.objects(building=building)
pg = Playground(
userid=userid,
pgid=pgid,
building=building,
algo_type=algo_type,
algo_model=algo_binaries,
playground_labeled_metadata=[]
).save()
# add playground to user's record
user.playground.append(pg)
user.save()
logger.info('build playground={} for user={}'.format(pg.pgid, user.userid))
message = {
'userid': userid,
'new_playground': pgid
}
resp = jsonify(message)
return resp | 31b73b3c505d27dca07569dc95c31d78822da452 | 8,512 |
def menuItemDirective(_context, menu, for_,
action, title, description=u'', icon=None, filter=None,
permission=None, layer=IDefaultBrowserLayer, extra=None,
order=0, item_class=None):
"""Register a single menu item."""
return menuItemsDirective(_context, menu, for_, layer).menuItem(
_context, action, title, description, icon, filter,
permission, extra, order, item_class) | 9ca19bd71cef30db9f8cd2a1154154965cf31b7d | 8,513 |
def getQueueStatistics ():
"""
Returns a 4-tuple containing the numbers of identifiers in the
Crossref queue by status: (awaiting submission, submitted,
registered with warning, registration failed).
"""
q = ezidapp.models.CrossrefQueue.objects.values("status").\
annotate(django.db.models.Count("status"))
d = {}
for r in q: d[r["status"]] = r["status__count"]
return (d.get("U", 0), d.get("S", 0), d.get("W", 0), d.get("F", 0)) | 2693365e24dc28b57ddbc8db5315779acee2d617 | 8,514 |
from typing import List
from typing import Union
def create_compressed_generator(
original_generator: CompressorArg,
compressed_cse_list: List[List[Union[List[uint64], List[Union[bytes, None, Program]]]]],
) -> BlockGenerator:
"""
Bind the generator block program template to a particular reference block,
template bytes offsets, and SpendBundle.
"""
start = original_generator.start
end = original_generator.end
program = DECOMPRESS_BLOCK.curry(
DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, Program.to(start), Program.to(end), compressed_cse_list
)
generator_arg = GeneratorArg(original_generator.block_height, original_generator.generator)
return BlockGenerator(program, [generator_arg]) | c2eb437caefa53452df61e1f5b4115ab4220a323 | 8,516 |
def run_mcmc(meas, x, nsamples, covm=None, scales=None):
"""
Sample the likelihood space with a Markov Chain Monte Carlo.
:param meas: TemplateMeasurement
measurement whose spectrum likelihood space is to be probe
:param x: [float]
parameter values where to start the chain
:param covm: [[float]]
covariance matrix values if sampling transformed space
:param scales: [float]
parameter scales if not sampling transformed space
:return: [float], [float], [float], pymcmc.MCMC
posterior mean, lower CI, upper CI for each parameter, and the MCMC
object used for sampling
"""
mcmc = MCMC(meas.spec.npars)
mcmc.set_values(x)
if covm is not None and scales is None:
mcmc.set_covm(covm)
elif scales is not None:
mcmc.set_scales(scales)
else:
raise ValueError("Must provide covariance OR scales")
mcmc.rescale = 2 # good starting point
mcmc.learn_scale(meas.spec.ll, 1000)
mcmc.run(meas.spec.ll, nsamples)
mean = list()
mean_down = list()
mean_up = list()
for ipar in range(meas.spec.npars):
mean.append(np.mean(mcmc.data[:, ipar]))
low, high, _, _ = npinterval.interval(mcmc.data[:, ipar], 0.6827)
mean_down.append(low-mean[-1])
mean_up.append(high-mean[-1])
return mean, mean_down, mean_up, mcmc | 79f7806d3c5c84693dfbfcd6d4236734ec7921de | 8,517 |
def get_vlan_list(dut, cli_type="click"):
"""
Get list of VLANs
Author : Prudvi Mangadu ([email protected])
:param dut:
:param cli_type:
:return:
"""
st.log("show vlan to get vlan list")
rv = show_vlan_config(dut, cli_type=cli_type)
vlan_list = list(set([eac['vid'] for eac in rv]))
return vlan_list | 5ce768bc8a30fa73fb2f4930384197535584de64 | 8,518 |
def begin_organization_creation_task(registered_id):
"""
Asynchronously create our tenant schema. Email owner when process completes.
"""
# Run the sub-routine for taking the OrganizationRegistration object
# creating our Tenant from it.
call_command('populate_organization', str(registered_id)) # foundation_public/management/commands/populate_organization.py
# Send email to the owner of the Organization letting them know we've successfully
# finished setting up their tenancy.
call_command('send_organization_ready_email', str(registered_id)) # foundation_email/management/commands/send_organization_ready_email.py
# Delete the registered organization.
PublicOrganizationRegistration.objects.get(id=registered_id).delete()
# Return nothing.
return None | fcaccc4e44def7a0d5ce83ac179899d0b288ac9c | 8,519 |
import itertools
def rewrite_blockwise(inputs):
"""Rewrite a stack of Blockwise expressions into a single blockwise expression
Given a set of Blockwise layers, combine them into a single layer. The provided
layers are expected to fit well together. That job is handled by
``optimize_blockwise``
Parameters
----------
inputs : List[Blockwise]
Returns
-------
blockwise: Blockwise
See Also
--------
optimize_blockwise
"""
if len(inputs) == 1:
# Fast path: if there's only one input we can just use it as-is.
return inputs[0]
inputs = {inp.output: inp for inp in inputs}
dependencies = {
inp.output: {d for d, v in inp.indices if v is not None and d in inputs}
for inp in inputs.values()
}
dependents = reverse_dict(dependencies)
new_index_iter = (
c + (str(d) if d else "") # A, B, ... A1, B1, ...
for d in itertools.count()
for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
)
[root] = [k for k, v in dependents.items() if not v]
# Our final results. These will change during fusion below
indices = list(inputs[root].indices)
new_axes = inputs[root].new_axes
concatenate = inputs[root].concatenate
dsk = dict(inputs[root].dsk)
changed = True
while changed:
changed = False
for i, (dep, ind) in enumerate(indices):
if ind is None:
continue
if dep not in inputs:
continue
changed = True
# Replace _n with dep name in existing tasks
# (inc, _0) -> (inc, 'b')
dsk = {k: subs(v, {blockwise_token(i): dep}) for k, v in dsk.items()}
# Remove current input from input indices
# [('a', 'i'), ('b', 'i')] -> [('a', 'i')]
_, current_dep_indices = indices.pop(i)
sub = {
blockwise_token(i): blockwise_token(i - 1)
for i in range(i + 1, len(indices) + 1)
}
dsk = subs(dsk, sub)
# Change new input_indices to match give index from current computation
# [('c', j')] -> [('c', 'i')]
new_indices = inputs[dep].indices
sub = dict(zip(inputs[dep].output_indices, current_dep_indices))
contracted = {
x
for _, j in new_indices
if j is not None
for x in j
if x not in inputs[dep].output_indices
}
extra = dict(zip(contracted, new_index_iter))
sub.update(extra)
new_indices = [(x, index_subs(j, sub)) for x, j in new_indices]
# Update new_axes
for k, v in inputs[dep].new_axes.items():
new_axes[sub[k]] = v
# Bump new inputs up in list
sub = {}
# Map from (id(key), inds or None) -> index in indices. Used to deduplicate indices.
index_map = {(id(k), inds): n for n, (k, inds) in enumerate(indices)}
for i, index in enumerate(new_indices):
id_key = (id(index[0]), index[1])
if id_key in index_map: # use old inputs if available
sub[blockwise_token(i)] = blockwise_token(index_map[id_key])
else:
index_map[id_key] = len(indices)
sub[blockwise_token(i)] = blockwise_token(len(indices))
indices.append(index)
new_dsk = subs(inputs[dep].dsk, sub)
# indices.extend(new_indices)
dsk.update(new_dsk)
# De-duplicate indices like [(a, ij), (b, i), (a, ij)] -> [(a, ij), (b, i)]
# Make sure that we map everything else appropriately as we remove inputs
new_indices = []
seen = {}
sub = {} # like {_0: _0, _1: _0, _2: _1}
for i, x in enumerate(indices):
if x[1] is not None and x in seen:
sub[i] = seen[x]
else:
if x[1] is not None:
seen[x] = len(new_indices)
sub[i] = len(new_indices)
new_indices.append(x)
sub = {blockwise_token(k): blockwise_token(v) for k, v in sub.items()}
dsk = {k: subs(v, sub) for k, v in dsk.items()}
indices_check = {k for k, v in indices if v is not None}
numblocks = toolz.merge([inp.numblocks for inp in inputs.values()])
numblocks = {k: v for k, v in numblocks.items() if v is None or k in indices_check}
# Update IO-dependency information
io_deps = {}
for v in inputs.values():
io_deps.update(v.io_deps)
return Blockwise(
root,
inputs[root].output_indices,
dsk,
new_indices,
numblocks=numblocks,
new_axes=new_axes,
concatenate=concatenate,
annotations=inputs[root].annotations,
io_deps=io_deps,
) | dc80aa6c55d3ac6fafe780e5c58f3961d5d92b66 | 8,520 |
def sort_drugs(processed_data, alpha_sort, **kwargs):
"""
Sorts all drug names, as primary keys of processed data dictionary. Sorting
is governed by primary criteria of decreasing cost, then secondary criteria
of alphabetical order. Secondary criteria ignores unsafe characters if
"alpha_sort" is True; and does not ignore unsafe characters if False.
Requires sort_criteria() inner function.
Args:
processed_data (dictionary): contains all analyzed data. Primary key
is drug name (string), and primary value is tuple containing
number of prescribers (integer, index 0) and total cost (float,
index 1).
alpha_sort (boolean): if True, special characters are not considered
during sorting. If False, special characters are considered during
sorting.
safe_char (list of strings): contains all characters considered safe.
Returns:
all_drugs_sorted (list of strings): contains all drug names in
sequential list sorted by drug cost and alphanumeric name.
"""
def sort_criteria(drug):
"""
Determines mapped sorting value of cost and alphanumeric name for
all drugs, as keys of processed data dictionary. Required by
sort_drugs() outer function.
Args:
drug (string): drug name.
Returns:
(tuple): ordered and mapped sorting criteria of cost and name.
"""
# Sets first criteria of decreasing drug cost
cost_criteria = - processed_data[drug][1]
# Sets second criteria of alphanumeric drug name
name_criteria = drug.upper()
# If True, does not consider special characters in alphanumeric order
if alpha_sort:
# Iterates over all characters in drug name
for char in drug:
# If character is not in safe list, remove from name criteria
if char not in safe_char:
# Removes special characters
name_criteria = name_criteria.replace(char,"")
# Returns primary and secondary sorting criteria
return (cost_criteria, name_criteria)
# Sets safe characters for evaluation of name criteria
safe_char = kwargs['ch']
# Sorts drug names by decreasing cost then alphanumeric order
all_drugs_sorted = sorted(processed_data, key=sort_criteria)
# Returns list of sorted drug names
return all_drugs_sorted | aa3727dc52f0204c7c39807982a998cc03fabd2d | 8,521 |
def log_k2ex_and_get_msg(ex, prefix, topology):
""" LOG K2 exception and extracted message. Return NLS message """
LOG.exception(ex)
detail = {}
k2msg = _("None")
if isinstance(ex, K2Error) and ex.k2response:
detail['Request_headers'] = ex.k2response.reqheaders
detail['Response_headers'] = ex.k2response.headers
detail['Response_body'] = ex.k2response.body
detail['Response_status'] = ex.k2response.status
if hasattr(ex.k2response, 'k2err'):
m = ex.k2response.k2err.find('./Message')
if m is not None:
k2msg = m.text
msg = _("%(prefix)s ***K2 Operator Error***: %(ex_msg)s [K2 Error body "
"Message: %(k2msg)s]") %\
dict(prefix=prefix, ex_msg=ex, k2msg=k2msg)
LOG.error(msg)
if detail:
LOG.error(_("Error details: %s") % detail)
if topology is not None:
if 'error' in topology:
topology['error'].append(msg)
else:
topology['error'] = [msg]
return msg | a1a827ac38980e593e58236ce8d60eb01b957050 | 8,522 |
def fetch_ticket(identifier):
"""Return data of ticket with given identifier as pandas dataframe."""
try:
return pd.read_csv(f'./data/tickets/{identifier}.csv')
except:
return None | 46d776eab0e7867dd14079147a6101c9b8fddfa5 | 8,523 |
import torch
def dice_loss(logits, targets, smooth=1.0):
"""
logits: (torch.float32) shape (N, C, H, W)
targets: (torch.float32) shape (N, H, W), value {0,1,...,C-1}
"""
outputs = F.softmax(logits, dim=1)
targets = torch.unsqueeze(targets, dim=1)
targets = torch.zeros_like(logits).scatter_(dim=1, index=targets.type(torch.int64), src=torch.tensor(1.0))
inter = outputs * targets
dice = 1 - ((2*inter.sum(dim=(2,3)) + smooth) / (outputs.sum(dim=(2,3))+targets.sum(dim=(2,3)) + smooth))
return dice.mean() | 4ac40e87fe048dbc3232bb82c7fa16d9c03a8439 | 8,524 |
import math
def make_axis_angle_matrix(axis, angle):
"""construct a matrix that rotates around axis by angle (in radians)"""
#[RMS] ported from WildMagic4
fCos = math.cos(angle)
fSin = math.sin(angle)
fX2 = axis[0]*axis[0]
fY2 = axis[1]*axis[1]
fZ2 = axis[2]*axis[2]
fXYM = axis[0]*axis[1]*(1-fCos)
fXZM = axis[0]*axis[2]*(1-fCos)
fYZM = axis[1]*axis[2]*(1-fCos)
fXSin = axis[0]*fSin
fYSin = axis[1]*fSin
fZSin = axis[2]*fSin
return ( fX2*(1-fCos)+fCos, fXYM-fZSin, fXZM+fYSin, fXYM+fZSin, fY2*(1-fCos)+fCos, fYZM-fXSin, fXZM-fYSin, fYZM+fXSin, fZ2*(1-fCos)+fCos ) | 1bef075e63b26559184025a69f47d8c1b6dccf1d | 8,527 |
def get_agent_type_from_project_type():
""" use project type to determine agent type """
if 'METRIC' in if_config_vars['project_type']:
if if_config_vars['is_replay']:
return 'MetricFileReplay'
else:
return 'CUSTOM'
elif if_config_vars['is_replay']:
return 'LogFileReplay'
else:
return 'LogStreaming'
# INCIDENT and DEPLOYMENT don't use this | a2ea351fcba68dde4db2b9200636c937a58ab960 | 8,528 |
import traceback
def close_server(is_rebooting = False):
"""
Close the Unity server and tell clients to react appropriately.
Set `is_rebooting` to handle cases like domain reload when Unity is expected to come back shortly.
Returns True if the server was closed by this call, False if it was already closed.
"""
global server
global clients
if server is None:
return False
# Tell all the clients to quit.
client_shutdown_async = []
clients_to_shutdown = []
with clients_lock:
for client_list in clients.values():
for c in client_list:
try:
shutdown_result = c.async_shutdown(is_rebooting)
# Give the client a half-second to tell us there was a problem.
# If they don't tell us in that time, we just ignore the problem.
shutdown_result.set_expiry(0.5)
client_shutdown_async.append(shutdown_result)
clients_to_shutdown.append(c)
except EOFError:
pass
for a in client_shutdown_async:
try:
a.wait()
a.value
except EOFError:
# The client shut down when we told it to shut down -- pretty normal.
pass
except:
print("Exception while shutting down a client: {}".format(traceback.format_exc()))
server.close()
# Process all jobs pending. Client threads might be waiting for jobs to be
# run on the main thread
while not jobs.empty():
process_jobs();
server.thread.join()
for c in clients_to_shutdown:
c.wait_for_thread()
# Finally release the lock file.
server.lockfile.release()
server = None
clients = dict()
return True | 77fbd9ecd8ed7489d4f5763c5bb417c7cb5ddb15 | 8,529 |
import types
def dict_decode(node_dict: dict) -> Node:
"""Convert a dictionary to an `Entity` node (if it has a `type` item)."""
if "type" not in node_dict:
return node_dict
node_type = node_dict.pop("type")
class_ = getattr(types, node_type, None)
if class_ is None:
return node_dict
node_kwargs = {}
for key, val in node_dict.items():
if isinstance(val, dict):
val = dict_decode(val)
elif isinstance(val, list):
processed_list = []
for sub_val in val:
if isinstance(sub_val, dict):
processed_list.append(dict_decode(sub_val))
else:
processed_list.append(sub_val)
val = processed_list
node_kwargs[key] = val
return class_(**node_kwargs) | 00b790e431cdf080c0a6220c2913fd511983904d | 8,530 |
from datetime import datetime
def compute_purges(snapshots, pattern, now):
"""Return the list of snapshots to purge,
given a list of snapshots, a purge pattern and a now time
"""
snapshots = sorted(snapshots)
pattern = sorted(pattern, reverse=True)
purge_list = []
max_age = pattern[0]
# Age of the snapshots in minutes.
# Example : [30, 70, 90, 150, 210, ..., 4000]
snapshots_age = []
valid_snapshots = []
for s in snapshots:
try:
snapshots_age.append(
int((now - datetime.strptime(
s.split('@')[1], DTFORMAT)).total_seconds()
)/60)
valid_snapshots.append(s)
except:
log.info("Skipping purge of %s with invalid date format", s)
continue
if not valid_snapshots:
return purge_list
# pattern = 3600:180:60
# age segments = [(3600, 180), (180, 60)]
for age_segment in [(pattern[i], pattern[i+1])
for i, p in enumerate(pattern[:-1])]:
last_timeframe = -1
for i, age in enumerate(snapshots_age):
# if the age is outside the age_segment, delete nothing.
# Only 70 and 90 are inside the age_segment (60, 180)
if age > age_segment[0] < max_age or age < age_segment[1]:
continue
# Now get the timeframe number of the snapshot.
# Ages 70 and 90 are in the same timeframe (70//60 == 90//60)
timeframe = age // age_segment[1]
# delete if we already had a snapshot in the same timeframe
# or if the snapshot is very old
if timeframe == last_timeframe or age > max_age:
purge_list.append(valid_snapshots[i])
last_timeframe = timeframe
return purge_list | 710a65ef7068d57470fb72ff171a1f1eb3480d65 | 8,531 |
import logging
def design_partial_factorial(k: int, res: int) -> DataFrame:
"""
design_partial_factorial
This function helps design 2 level partial factorial experiments. These experiments are often
described using the syntax l**(k-p) where l represents the level of each factor, k represents
the total number of factors considered, and p represents a scaling factor relative to the full
factorial design.
This function assumes that l=2. Users are not asked to set p, instead the user sets a minimum
desired resolution for their experiment. Resolution describes the kind of aliasing incurred by
scaling down from a full to a partial factorial design. Higher resolutions have less potential
aliasing (confounding).
Resolution number is determined through the defining relation of the partial factorial design.
For the 6 factor design 2**(6-p) with factors ABCDEF, example defining relations (I) are shown
below. The resolution cannot exceed the number of factors in the experiment. So a 6 factor
experiment can be at most a resolution 6 (otherwise it would be a full factorial experiment).
* Res I: I = A
* Res II: I = AB
* Res III: I = ABC
* Res IV: I = ABCD
* Res V: I = ABCDE
* Res VI: I = ABCDEF
Practically we tend to use resolution III-, IV- and V-designs.
* Res I: Cannot distinguish between levels within main effects (not useful).
* Res II: Main effects may be aliased with other main effects (not useful).
* Res III: Main effects may be aliased with two-way interactions.
* Res IV: Two-way interactions may be aliased with each other.
* Res V: Two-way interactions may be aliased with three-way interactions.
* Res VI: Three-way interactions may be aliased with each other.
Parameters
----------
k : int
the total number of factors considered in the experiment
res : int
the desired minimum resolution of the experiment
Returns
-------
pd.DataFrame
A dataframe with the partial factorial design
Examples
--------
>>> # create partial factorial design for a 2 level 4 factor resolution III experiment
>>> design_df = design_partial_factorial(k=4, res=3)
"""
_check_int_input(k, "k")
_check_int_input(res, "res")
assert res <= k, "Resolution must be smaller than or equal to the number of factors."
# Assume l=2 and use k specified by user to solve for p in design
n = arange(res - 1, k, 1)
k_minus_p = k - 1 if res == k else n[~(_k_combo_vec(n, res) < k)][0]
logging.info("Partial Factorial Design: l=2, k={}, p={}".format(k, k - k_minus_p))
logging.info("Ratio to Full Factorial Design: {}".format(Fraction(2**k_minus_p / 2**k)))
# identify the main effects and interactions for the design
main_factors = arange(k_minus_p)
clean = lambda x: x.replace(" ", " ").strip(" ").replace(" ", ":")
interactions = [clean(_array_to_string(main_factors))] if res == k else \
[
clean(_array_to_string(c))
for r in range(res - 1, k_minus_p)
for c in combinations(main_factors, r)
][:k - k_minus_p]
# combine main effects and interactions into a single design string (format inspired by patsy)
factors = " ".join([_array_to_string(main_factors)] + interactions)
logging.info("Design string: {}".format(factors))
main_factors = [i for i in factors.split(" ") if i and ":" not in i]
two_level_full_factorial = [[-1, 1] for _ in main_factors]
full_factorial_design = design_full_factorial(two_level_full_factorial)
interactions = [
["x" + i for i in j.split(":")]
for j in [i for i in factors.split(" ") if i and ":" in i]
]
design = "+".join(full_factorial_design.columns.tolist() + [":".join(i) for i in interactions])
partial_factorial_design = dmatrix(design, full_factorial_design, return_type='dataframe').drop(
columns=["Intercept"], axis=1)
partial_factorial_design.columns = \
["x{}".format(i) for i in range(partial_factorial_design.shape[1])]
return partial_factorial_design | a9c93cf696c33f0eb74cb092d1f340d5732dc994 | 8,532 |
def get_trending_queries(filename):
"""Extract trends from a file."""
f = open(filename, 'r')
trend_tuples_list = []
for line in f:
trend_tuples_list.append(tuple((line.strip()).split(',')))
f.close()
return trend_tuples_list | 6f5828d4bf0092c0a43804ca7ffb9ee4aa67e607 | 8,534 |
def get_bio(x, lang='en'):
"""Get the one-sentence introduction"""
bio = x.loc[16][lang]
return bio | 8c9ddabd2e6ada790af2b85a3fb656291f3ee5bd | 8,535 |
import io
def create_tf_example(filename, source_id, encoded_jpeg, annotations, resize=True):
"""
This function creates a tf.train.Example in object detection api format from a Waymo data frame.
args:
- filename [str]: name of the original tfrecord file
- source_id [str]: original image source id (here: frame context name + camera name + frame index)
- encoded_jpeg [bytes]: jpeg encoded image
- annotations [protobuf object]: bboxes and classes
returns:
- tf_example [tf.Train.Example]: tf example in the objection detection api format.
"""
if not resize:
encoded_jpg_io = io.BytesIO(encoded_jpeg)
image = Image.open(encoded_jpg_io)
width, height = image.size
width_factor, height_factor = image.size
else:
image_tensor = tf.io.decode_jpeg(encoded_jpeg)
height_factor, width_factor, _ = image_tensor.shape
image_res = tf.cast(tf.image.resize(image_tensor, (640, 640)), tf.uint8)
encoded_jpeg = tf.io.encode_jpeg(image_res).numpy()
width, height = 640, 640
mapping = {1: 'vehicle', 2: 'pedestrian', 4: 'cyclist'}
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
filename = filename.encode('utf8') # convert to bytes in utf8 format
source_id = source_id.encode('utf8') # convert to bytes in utf8 format
for ann in annotations:
xmin, ymin = ann.box.center_x - 0.5 * ann.box.length, ann.box.center_y - 0.5 * ann.box.width
xmax, ymax = ann.box.center_x + 0.5 * ann.box.length, ann.box.center_y + 0.5 * ann.box.width
xmins.append(xmin / width_factor)
xmaxs.append(xmax / width_factor)
ymins.append(ymin / height_factor)
ymaxs.append(ymax / height_factor)
classes.append(ann.type)
classes_text.append(mapping[ann.type].encode('utf8'))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': int64_feature(height),
'image/width': int64_feature(width),
'image/filename': bytes_feature(filename),
'image/source_id': bytes_feature(source_id),
'image/encoded': bytes_feature(encoded_jpeg),
'image/format': bytes_feature(image_format),
'image/object/bbox/xmin': float_list_feature(xmins),
'image/object/bbox/xmax': float_list_feature(xmaxs),
'image/object/bbox/ymin': float_list_feature(ymins),
'image/object/bbox/ymax': float_list_feature(ymaxs),
'image/object/class/text': bytes_list_feature(classes_text),
'image/object/class/label': int64_list_feature(classes),
}))
return tf_example | b757fc1e4d51fac5722eb170357ea36388d40d5d | 8,536 |
import re
def format_oids(oids_parameters):
"""
Format dictionary OIDs to ``cryptography.x509.oid.NameOID`` object list
:param oids_parameters: CA Object Identifiers (OIDs).
The are typically seen in X.509 names.
Allowed keys/values:
``'country_name': str (two letters)``,
``'locality_name': str``,
``'state_or_province': str``,
``'street_address': str``,
``'organization_name': str``,
``'organization_unit_name': str``,
``'email_address': str``,
:type oids_parameters: dict, required
:return: ``cryptography.x509.oid.NameOID`` object list
:rtype: object ``cryptography.x509.oid.NameOID`` object list
"""
oids = list()
for oid in oids_parameters:
if oid in OIDS:
current_oid = oids_parameters[oid]
if type(current_oid) is not str:
raise TypeError(f"'{oid}' must be str")
if oid == "country_name":
# country name ISO 3166-1 (alfa-2)
if not re.match(COUNTRY_REGEX, current_oid):
raise OwnCAInvalidOID(
f"'{oid}' must be ISO 3166-1 (alfa-2)"
)
else:
oids.append(
x509.NameAttribute(NameOID.COUNTRY_NAME, current_oid)
)
elif oid == "locality_name":
oids.append(
x509.NameAttribute(NameOID.LOCALITY_NAME, current_oid)
)
elif oid == "state_or_province":
oids.append(
x509.NameAttribute(
NameOID.STATE_OR_PROVINCE_NAME, current_oid
)
)
elif oid == "street_address":
oids.append(
x509.NameAttribute(NameOID.STREET_ADDRESS, current_oid)
)
elif oid == "organization_name":
oids.append(
x509.NameAttribute(NameOID.ORGANIZATION_NAME, current_oid)
)
elif oid == "organization_unit_name":
oids.append(
x509.NameAttribute(
NameOID.ORGANIZATIONAL_UNIT_NAME, current_oid
)
)
elif oid == "email_address":
oids.append(
x509.NameAttribute(NameOID.EMAIL_ADDRESS, current_oid)
)
else:
raise OwnCAInvalidOID(
f"The '{oid}' is Invalid. Allowed OIDs: {', '.join(OIDS)}."
)
return oids | 08641ffb1c431c13e23f2b9498ce1cb1a896f955 | 8,537 |
def Phases(*args):
"""Number of phases"""
# Getter
if len(args) == 0:
return lib.Generators_Get_Phases()
# Setter
Value, = args
lib.Generators_Set_Phases(Value) | d1610c5b2ab19cf2b3018850fe685bb9fcbc11ad | 8,538 |
import requests
def create_channel(logger: Logger,
connection: komand.connection,
team_id: str,
channel_name: str,
description: str) -> bool:
"""
Creates a channel for a given team
:param logger: (logging.logger)
:param connection: Object (komand.connection)
:param team_id: String
:param channel_name: String
:param description: String
:return: boolean
"""
create_channel_endpoint = f"https://graph.microsoft.com/beta/teams/{team_id}/channels"
create_channel_paylaod = {
"description": description,
"displayName": channel_name
}
headers = connection.get_headers()
logger.info(f"Creating channel with: {create_channel_endpoint}")
result = requests.post(create_channel_endpoint, json=create_channel_paylaod, headers=headers)
try:
result.raise_for_status()
except Exception as e:
raise PluginException(cause=f"Create channel {channel_name} failed.",
assistance=result.text) from e
if not result.status_code == 201:
raise PluginException(cause=f"Create channel returned an unexpected result.",
assistance=result.text)
return True | 6cdd37a7fdc131433f9f75ba10e523bc719a34aa | 8,539 |
def activate_user(username):
"""Activate a user account."""
user = annotator.credentials.find_one({'username': username})
if not user['active']:
annotator.credentials.update_one(user, {'$set': {'active': True}})
flash("User {0} activated successfully".format(username), 'success')
else:
flash("User {0} is already active".format(username), 'warning')
return redirect(url_for('manage_users')) | 58b70edc4a098a7409e1c2e62f9710b3da3c95af | 8,541 |
def query_all():
"""Queries all matches in Elasticsearch, to be used further for suggesting
product names when a user is not aware of them.
"""
query_all = {
"query": {"match_all": {}},
}
return query_all | 9d15297cf82d813ff0a0688f5c25e2ca6fa145d3 | 8,542 |
def _mesh_homogeneous_cell(cell_vect, mesh_path):
"""Generate a simple mesh for a homogeneous cell.
cell_vect: np.array 2x2 colonnes = vecteurs periodicité
"""
name = mesh_path.stem
geometry.init_geo_tools()
geometry.set_gmsh_option("Mesh.MshFileVersion", 4.1)
# Mesh.Algorithm = 6; Frontal - Delaunay for 2D meshes
geometry.set_gmsh_option("Mesh.Algorithm", 6)
geometry.set_gmsh_option("Mesh.MeshSizeMin", 0.05)
geometry.set_gmsh_option("Mesh.MeshSizeMax", 0.05)
rve = Gmsh2DRVE([], cell_vect, (1, 1), np.zeros(2), [], False, name)
rve.mesh_generate()
gmsh.model.mesh.renumberNodes()
gmsh.model.mesh.renumberElements()
gmsh.write(str(mesh_path))
mesh_path = msh_conversion(mesh_path, ".xdmf")
geometry.reset()
return mesh_path | 98c63d7764bcca7baad81de1fe7c3fac16ff6ffd | 8,543 |
from typing import Dict
from typing import Union
from typing import Optional
from typing import List
from typing import Tuple
from typing import cast
from typing import Any
import json
def fetch_incidents(client: Client, max_incidents: int,
last_run: Dict[str, Union[Optional[int], Optional[str]]], first_fetch: Optional[int],
priority: Optional[str], activity_status: Optional[str],
progress_status: Optional[str], business_units: Optional[str], issue_types: Optional[str],
tags: Optional[str], cloud_management_status: Optional[str],
mirror_direction: Optional[str], sync_tags: Optional[List[str]],
fetch_details: Optional[bool]
) -> Tuple[Dict[str, Union[Optional[int], Optional[str]]], List[dict]]:
"""This function retrieves new alerts every interval (default is 1 minute).
This function has to implement the logic of making sure that incidents are
fetched only onces and no incidents are missed. By default it's invoked by
XSOAR every minute. It will use last_run to save the timestamp of the last
incident it processed. If last_run is not provided, it should use the
integration parameter first_fetch to determine when to start fetching
the first time. Uses "createdAfter" in the Expanse API for timestamp.
:return:
A tuple containing two elements:
next_run (``Dict[str, int]``): Contains the timestamp that will be
used in ``last_run`` on the next fetch, and the last issue id.
incidents (``List[dict]``): List of incidents that will be created in XSOAR
:rtype: ``Tuple[Dict[str, Union[Optional[int], Optional[str]]], List[dict]]``
"""
last_fetch = last_run.get('last_fetch')
if last_fetch is None:
last_fetch = cast(int, first_fetch)
else:
last_fetch = cast(int, last_fetch)
latest_created_time = last_fetch
last_issue_id = last_run.get('last_issue_id')
latest_issue_id: Optional[str] = None
incidents: List[Dict[str, Any]] = []
arg_list = argToList(priority)
if arg_list and not all(i in ISSUE_PRIORITY for i in arg_list):
raise ValueError(f'priority must include: {", ".join(ISSUE_PRIORITY)}')
_priority = ','.join(arg_list)
arg_list = argToList(progress_status)
if arg_list and not all(i in ISSUE_PROGRESS_STATUS for i in arg_list):
raise ValueError(f'progressStatus must include: {", ".join(ISSUE_PROGRESS_STATUS)}')
_progress_status = ','.join(arg_list)
arg_list = argToList(activity_status)
if arg_list and not all(i in ISSUE_ACTIVITY_STATUS for i in arg_list):
raise ValueError(f'activityStatus must include: {", ".join(ISSUE_ACTIVITY_STATUS)}')
_activity_status = ','.join(arg_list)
arg_list = argToList(cloud_management_status)
if arg_list and not all(i in CLOUD_MANAGEMENT_STATUS for i in arg_list):
raise ValueError(f'cloudManagementStatus must include: {", ".join(CLOUD_MANAGEMENT_STATUS)}')
_cloud_management_status = ','.join(arg_list)
created_after = timestamp_us_to_datestring_utc(latest_created_time, DATE_FORMAT)
r = client.get_issues(
limit=max_incidents if not last_issue_id else max_incidents + 1, # workaround to avoid unnecessary API calls
priority=_priority, business_units=business_units,
progress_status=_progress_status, activity_status=_activity_status, tags=tags,
issue_type=issue_types, cloud_management_status=_cloud_management_status,
created_after=created_after, sort='created'
)
broken = False
issues: List = []
skip = cast(str, last_issue_id)
for i in r:
if skip and not broken:
if 'id' not in i or 'created' not in i:
continue
# fix created time to make sure precision is the same to microsecond with no rounding
i['created'] = timestamp_us_to_datestring_utc(datestring_to_timestamp_us(i['created']), DATE_FORMAT)
if i['created'] != created_after:
issues.append(i)
broken = True
elif i['id'] == skip:
broken = True
else:
issues.append(i)
if len(issues) == max_incidents:
break
for issue in issues:
ml_feature_list: List[str] = []
if 'created' not in issue or 'id' not in issue:
continue
incident_created_time = datestring_to_timestamp_us(issue.get('created'))
if last_fetch:
if incident_created_time < last_fetch:
continue
incident_name = issue.get('headline') if 'headline' in issue else issue.get('id')
# Mirroring
issue['xsoar_mirroring'] = {
'mirror_direction': mirror_direction,
'mirror_id': issue.get('id'),
'mirror_instance': demisto.integrationInstance(),
'sync_tags': sync_tags
}
issue['xsoar_severity'] = convert_priority_to_xsoar_severity(issue.get('priority', 'Unknown'))
# Handle asset information
issue['assets'], ml_feature_list, _ = client.parse_asset_data(issue, fetch_details)
# add issue specific information to ml key
if (
(provider := issue.get('providers'))
and isinstance(provider, list)
and 'name' in provider[0]
):
ml_feature_list.append(provider[0].get('name'))
if (
(latest_evidence := issue.get('latestEvidence'))
and isinstance(latest_evidence, dict)
):
if (
(geolocation := latest_evidence.get('geolocation'))
and isinstance(geolocation, dict)
):
for f in ['countryCode', 'city']:
if (x := geolocation.get(f)):
ml_feature_list.append(x)
# dedup, sort and join ml feature list
issue['ml_features'] = ' '.join(sorted(list(set(ml_feature_list))))
incident = {
'name': incident_name,
'details': issue.get('helpText'),
'occurred': issue.get('created'),
'rawJSON': json.dumps(issue),
'severity': issue.get('xsoar_severity')
}
latest_issue_id = issue.get('id')
incidents.append(incident)
if incident_created_time > latest_created_time:
latest_created_time = incident_created_time
next_run = {
'last_fetch': latest_created_time,
'last_issue_id': latest_issue_id if latest_issue_id else last_issue_id}
return next_run, incidents | e273d69611331c9f2eb5b2c0c9c27e805c9d7e4f | 8,545 |
import collections
def extractWordFeatures(x):
"""
Extract word features for a string x. Words are delimited by
whitespace characters only.
@param string x:
@return dict: feature vector representation of x.
Example: "I am what I am" --> {'I': 2, 'am': 2, 'what': 1}
"""
# BEGIN_YOUR_CODE (our solution is 4 lines of code, but don't worry if you deviate from this)
mydict = collections.defaultdict(float)
for s in x.split(' '):
if s.isalnum() and s[0:4] != "http":
mydict[s] += 1
return mydict
# END_YOUR_CODE | dd5247dbf7ef69043b200acbefec996107de00f7 | 8,546 |
def delete_user(user_id):
"""
Delete user specified in user ID
Note: Always return the appropriate response for the action requested.
"""
user = mongo_mgr.db.user.find_one({'_id': user_id})
if user:
user.deleteOne({'_id': user_id})
result = {'id': user_id}
else:
result = "No result."
return jsonify({'result': result}) | b3244aeafcddd6c5be1d209c89ef7ed7969da989 | 8,547 |
from operator import and_
import logging
def query_attention_one(**kwargs):
"""
查询当前用户是否关注指定的物件
:param kwargs: {'user_id': user_id, 'object_id': object_id}
:return: 0 or 1
"""
session = None
try:
session = get_session()
results = session.query(func.count('*')).filter(and_(Attention.OPEN_ID == kwargs['user_id'],
Attention.OBJECT_ID == kwargs['object_id'])).scalar()
# 提交即保存到数据库
session.commit()
logging.info('OK : attention.py--->query_attention_one(), 成功')
return str(results)
except Exception as e:
logging.critical('Error : attention.py--->query_attention_one() 失败:{}'.format(e))
return RESULT_ERROR
finally:
session.close() | 44db7006eec38c2524fe5a74dba46086c63c79c5 | 8,548 |
def _dict_empty_map_helper(values, empty, delim, av_separator, v_delimiter,
parser):
"""
A helper to consolidate logic between singleton and non-singleton mapping.
Args:
values: The value to parse.
empty: The empty representation for this value in CoNLL-U format.
delim: The delimiter between components of the value.
av_separator: The separator between attribute and value in each
component.
v_delimiter: The delimiter between values for the same attribute.
parser: The parser of the value from the attribute value pair.
Returns:
An empty dict if the value is empty and otherwise a parsed equivalent.
Raises:
ParseError: If the dict format was unable to parsed. This error will be
raised by the provided parser.
"""
if values == empty:
return {}
d = {}
for el in values.split(delim):
parts = el.split(av_separator, 1)
if len(parts) == 1 or (len(parts) == 2 and parts[1] == ''):
k = parts[0]
v = None
elif len(parts) == 2:
k, v = parts
parsed = parser(v, v_delimiter)
d[k] = parsed
return d | cb5550eb606beb47f31236b827e78f2a7fc4ba40 | 8,549 |
def get_restricted_area(path1, path2, restricted_pos1, restricted_pos2, time_step):
"""Computes the restricted area and the start- and end-time steps for both agents.
* start time-step: The first time step where an agent occupies a position within the restricted
area.
* end time-step: The last time step where an agent occupies a position with the restricted area
:param path1: Path (previous solution) from the first agent.
:param path2: Path (previous solution) from the second agent.
:param restricted_pos1: The first position which agent one would occupy within the restricted
area.
:param restricted_pos2: The first position which agent two would occupy within the restricted
area.
:param time_step: The time step where the agents would collide.
:return: The positions included within the restricted area, the start time steps for both agents
and the end time steps for both agents.
"""
sub_sequence1 = find_stop_position(path1[:time_step + 2][::-1], restricted_pos1)[::-1]
sub_sequence2 = find_stop_position(path2[:time_step + 2][::-1], restricted_pos2)
restricted_area = list(dict.fromkeys(sub_sequence1)) + list(dict.fromkeys(sub_sequence2))
# Determine time step where agent enters restricted area
fst_enter_r = find_stop_position(
list(zip(path1, range(len(path1))))[:time_step + 2], restricted_pos1
)[-1][1]
snd_enter_r = find_stop_position(
list(zip(path2, range(len(path2))))[:time_step + 2], restricted_pos2
)[-1][1]
start_time_steps = [fst_enter_r, snd_enter_r]
# Determine how long the agent remains within the restricted area
end_time_steps = []
for path, r, enter in [
(path1, restricted_area, fst_enter_r), (path2, restricted_area[::-1], snd_enter_r)
]:
path_idx = 0
for idx in range(len(restricted_area)):
# Agent might wait in the restricted area because of other constraints
while path_idx < len(path[enter:]) \
and path[enter:][path_idx] == path[enter:][path_idx - 1]:
path_idx += 1
# The last position of the agent is within the restricted area
if path_idx >= len(path[enter:]) - 1:
path_idx = len(path[enter:])
break
if path[enter:][path_idx] != r[idx]:
break
path_idx += 1
end_time_steps.append(path_idx)
end_time_steps[0] += start_time_steps[0]
end_time_steps[1] += start_time_steps[1]
return restricted_area, start_time_steps, end_time_steps | 39104a44e8d5354799e45feb1ba6371f3423fecc | 8,551 |
def FR_highpass(freq: np.ndarray, hp_freq: float,
trans_width: float) -> np.ndarray:
"""Frequency responce for highpass filter
Parameters
----------
``freq``: np.ndarray
frequency array
``hp_freq``: float
highpass frequency
``trans_width``: float
width of the transition region between bands
Returns
-------
np.ndarray
with values in [0, 1]
"""
sigma = trans_width / 6.
return 1 / (1 + np.exp((hp_freq - freq) / sigma)) | a57058a3fdf257ee68efe0c99d668e4f5b4fbf60 | 8,553 |
def _rexec(params):
"""Start a subprocess shell to execute the specified command and return its output.
params - a one element list ["/bin/cat /etc/hosts"]
"""
# check that params is a list
if not isinstance(params, list) or len(params) == 0:
return "Parameter must be a not empty list"
command = params[0]
try:
subprocess.check_call(command,shell=True)
out = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read()
return '\n' + out.decode()
except Exception, e:
print e
return "{\"msg\":\"Invalid command.\"}" | e8338dc94b177f5d39d5307a88da7aa040a3a7e1 | 8,554 |
def _get_compose_template(manifest):
"""
Build the service entry for each one of the functions in the given context.
Each docker-compose entry will depend on the same image and it's just a static
definition that gets built from a template. The template is in the artifacts
folder.
"""
artifact = get_artifact('compose-template.yml')
def build_section(label):
return [
{
'name': name,
'image': _get_docker_image(manifest, sls_section),
'volumes': _get_volumes(manifest, sls_section)
}
for name, sls_section in manifest.get(label, {}).items()
]
# Load the jinja template and build the sls functions and layers.
return Template(artifact).render(
functions=build_section('functions'),
layers=build_section('layers')
) | 659e28f97c76a386a20c85fadaa3d0bbd6d88a90 | 8,555 |
def _ParsePackageNode(package_node):
"""Parses a <package> node from the dexdump xml output.
Returns:
A dict in the format:
{
'classes': {
<class_1>: {
'methods': [<method_1>, <method_2>]
},
<class_2>: {
'methods': [<method_1>, <method_2>]
},
}
}
"""
classes = {}
for child in package_node:
if child.tag == 'class':
classes[child.attrib['name']] = _ParseClassNode(child)
return {'classes': classes} | 89eefebb82848acad23a9703b87177f626fbbdf5 | 8,556 |
def greet(lang):
"""This function is for printing a greeting in some
selected languages: Spanish, Swedish, and German"""
if lang == 'es':
return 'Hola'
elif lang == 'ge':
return 'Hallo'
elif lang == 'sv':
return 'Halla'
else:
return 'Hello' | dcbe0fb39e735666b36780ee8d06b457e0a9541e | 8,557 |
def add_hook(
show_original=False,
show_transformed=False,
predictable_names=False,
verbose_finder=False,
):
"""Creates and adds the import hook in sys.meta_path"""
callback_params = {
"show_original": show_original,
"show_transformed": show_transformed,
"predictable_names": predictable_names,
}
hook = import_hook.create_hook(
transform_source=transform_source,
callback_params=callback_params,
hook_name=__name__,
verbose_finder=verbose_finder,
)
return hook | efda58094ab2bb218dca8babcdbf0a74b97e0cd8 | 8,558 |
def correlate_two_dicts(xdict, ydict, subset_keys=None):
"""Find values with the same key in both dictionary and return two arrays of corresponding values"""
x, y, _ = correlate_two_dicts_verbose(xdict, ydict, subset_keys)
return x, y | 93287a57c7bf4e8cb03384531ffbca9c6d6e7cfc | 8,559 |
def find_gateways(unicast_gateway, session, apic) -> tuple:
"""Search for ACI Gateways and get configurations"""
get_gateway = get_subnets(session, apic)
aps = []
epgs = []
l3Outs = []
gateways = []
location, bridge_domain, uni_route, scope, unkwn_uni, tenant, bd_vrf, iplearn = None, "DoesntExist", None, None, None, None, None, None
try:
# Locate subnet in ACI, get scope, map location
for fvSubnet in get_gateway.iter("fvSubnet"):
ip = fvSubnet.get("ip")
gateways.append(ip)
if unicast_gateway in ip:
location = fvSubnet.get("dn")
scope = fvSubnet.get("scope")
break
# Find BD, check to see if unicast routing is enable and unknown unicast setting is
for fvBD in get_gateway.iter("fvBD"):
bds = fvBD.get("name")
iplearn = fvBD.get("ipLearning")
mtu = fvBD.get("mtu")
learn_limit = fvBD.get("limitIpLearnToSubnets")
mac = fvBD.get("mac")
if location.rfind(bds) != -1:
bridge_domain = bds
uni_route = fvBD.get("unicastRoute")
unkwn_uni = fvBD.get("unkMacUcastAct")
# Find vrf associated with BD
for fvRsCtx in get_gateway.iter("fvRsCtx"):
vrf = fvRsCtx.get("tnFvCtxName")
location = fvRsCtx.get("dn")
if location.rfind(bridge_domain) != -1:
bd_vrf = vrf
# Find tenant, ap, and epgs, save to list
for fvRtBd in get_gateway.iter("fvRtBd"):
dn = fvRtBd.get("dn")
if dn.rfind(bridge_domain) != -1:
tenant = dn.split("/")[1].strip("tn-")
aps.append(dn.split("/")[5].strip("ap-"))
epgs.append(dn.split("/")[6].strip("epg-").strip("]"))
# Find L3outs, save to list
for fvRsBDToOut in get_gateway.iter("fvRsBDToOut"):
dn = fvRsBDToOut.get("dn")
if dn.rfind(bridge_domain) != -1:
l3Outs.append(dn.split("/")[3].strip("rsBDToOut-"))
# Find L3outs, save to list
for ipLearning in get_gateway.iter("ipLearning"):
iplearn = ipLearning.get("ipLearning")
except AttributeError:
pass
# Set variables from conditions
if aps:
join_aps = ', '.join(aps)
else:
join_aps = None
if epgs:
join_epgs = ', '.join(epgs)
else:
join_epgs = None
if l3Outs:
join_l3outs = ', '.join(l3Outs)
else:
join_l3outs = None
if not bd_vrf:
bd_vrf = None
if not unicast_gateway:
bridge_domain = 0
# Return to user input
return bridge_domain, uni_route, scope, unkwn_uni, tenant, join_aps, join_epgs, join_l3outs, bd_vrf, iplearn, mtu, learn_limit, mac, gateways | 7c2f841e9fd3822c03f8b4ea38581bcaba1b60d2 | 8,560 |
import torch
def hamming_dist(y_true, y_pred):
"""
Calculate the Hamming distance between a given predicted label and the
true label. Assumes inputs are torch Variables!
Args:
y_true (autograd.Variable): The true label
y_pred (autograd.Variable): The predicted label
Returns:
(float): The Hamming distance between the two vectors
"""
# Make sure y_pred is rounded to 0/1
y_pred = torch.round(y_pred)
result = torch.mean(torch.abs(y_true - y_pred), dim=1)
result = torch.mean(result, dim=0)
return float(result.data.cpu().numpy()) | 0edda102820626b824861ac0f05d4d77f5def432 | 8,561 |
from typing import Tuple
def tuple_action_to_int(
action: Tuple[int, int], slot_based: bool, end_trial_action: bool
) -> int:
"""Converts tuple action to integer."""
stone, potion = action
num_special_actions = 2 if end_trial_action else 1
if stone < 0:
return stone + num_special_actions
if slot_based:
potions_and_cauldron = MAX_POTIONS + 1
else:
potions_and_cauldron = PerceivedPotion.num_types + 1
return stone * potions_and_cauldron + potion + 1 + num_special_actions | d1f616706910822670b0d14d6a19f3f9dbddf145 | 8,563 |
def warpImage(imIn, pointsIn, pointsOut, delaunayTri):
"""
变换图像
参数:
===========
imIn:输出图像
pointsIn:输入点
pointsOut:输出点:
delaunayTri:三角形
返回值:
============
imgOut:变形之后的图像
"""
pass
h, w, ch = imIn.shape
imOut = np.zeros(imIn.shape, dtype=imIn.dtype)
for j in range(0, len(delaunayTri)):
tin = []
tout = []
for k in range(0, 3):
pIn = pointsIn[delaunayTri[j][k]]
pIn = constrainPoint(pIn, w, h)
pOut = pointsOut[delaunayTri[j][k]]
pOut = constrainPoint(pOut, w, h)
tin.append(pIn)
tout.append(pOut)
warpTriangle(imIn, imOut, tin, tout)
return imOut | f672cf4e6cad968c6f42747f128b436e9b00c466 | 8,565 |
import re
def rmchars(value):
"""Remove special characters from alphanumeric values except for period (.)
and negative (-) characters.
:param value: Alphanumeric value
:type value: string
:returns: Alphanumeric value stripped of any special characters
:rtype: string
>>> import utils
>>> utils.rmchars(value = "*6.5_")
'6.5'
>>> utils.rmchars(value = "ICE")
'ICE'
>>> utils.rmchars(value = "-4.2")
'-4.2'
>>> utils.rmchars(value = "%&!@#8.32&#*;")
'8.32'
"""
value = re.sub("[^A-Za-z0-9.-]+", "", value)
return value | 63428103f7da4184c6d9f33a9d05b02ce17f2448 | 8,566 |
def ema(x):
"""
[Definition] 以period为周期的指数加权移动平均线
[Category] 技术指标
"""
return 'ema(%s,%s)' %(x, pe.gen_param('ema', 'period')) | d5490340520f57c9083ae82d6fd1cadd2fc92208 | 8,567 |
from typing import Set
def tokenized(phrase: str) -> Set[str]:
"""Split a phrase into tokens and remove stopwords."""
return set(normalize(phrase).split()) - STOPWORDS | 3a01f5ea316de0f5b27506d1ff7f2358273616a2 | 8,568 |
async def server_error(request, exc):
"""
Return an HTTP 500 page.
"""
template = '500.html'
context = {'request': request}
return templates.TemplateResponse(template, context, status_code=500) | a11be57885b0f0f9107b190bafdebc6f13908f84 | 8,570 |
def return_post():
""""
Returns the post-processing plugins.
:param: None
:return: POST_PROCESSING_PLUGINS
"""
return POST_PROCESSING_PLUGINS | 9c7469f8ec336217abdfdb46db8a0c511789a4bf | 8,571 |
import base64
def numpy_to_b64str(img):
"""
Converts a numpy array into a base 64 string
Args:
img (np.array):
Returns:
str: base 64 representation of the numpy array/image.
"""
img = img[..., ::-1] # flip for cv conversion
_, img = cv2.imencode('.jpg', img) # strips header
image_base64 = base64.b64encode(img)
base64_string = image_base64.decode('utf-8') # convert to string
return base64_string | a6af378a26dd3adac08568f49a5d8d74954feddc | 8,573 |
def lennard_jones(r, epsilon, sigma, index=(12, 6)):
"""
General pair potential resembling a Lennard Jones model. Default indexes
values are for a typical LJ potential, also called 12-6 potential.
Parameters
----------
r : float or np.ndarray
Distance between interacting particles. It can be a float or a numpy
arrays containing a set of particle-particle distances.
epsilon : float
Dispersion energy, i.e. depth of the potential well.
sigma : float
Distance at which the potential energy is zero.
index : tuple, optional
Power indexes for repulsive and attractive terms. The default is (12, 6).
Returns
-------
float or np.ndarray
Potential energies at the corresponding distances.
"""
sig_r = sigma/r
return 4*epsilon*(m.pow(sig_r, index[0]) - m.pow(sig_r, index[1])) | c16856d1960f1b2542305e4048d8e9fe5e866210 | 8,574 |
def get_unique_name(x, mult=0, extra=''):
"""
Returns a unique key composed of inchikey and multiplicity
>>> mol = get_mol('[O][O]')
>>> get_unique_name(mol)
'MYMOFIZGZYHOMD-UHFFFAOYSA-N3'
"""
mol = get_mol(x, make3D=True)
if mult == 0:
mult = mol.spin
return mol.write("inchikey").strip() + str(mult) + extra | a9a58078fb2af1c0542dcf77f522154dd2c3a374 | 8,575 |
def get_individual_user(user_id: int) -> JSONResponse:
"""
Lists all information belonging to one user.
:param user_id: the id of the user
:return: status code and response data
"""
user = _get_db()["users"].find_one({"user_id": user_id})
return JSONResponse(status_code=status.HTTP_200_OK, content=dumps(user)) | dfa8d5cdfa8dd8363c550c79d18924a0b5a5764b | 8,576 |
from typing import Union
from typing import List
from typing import Optional
from typing import Tuple
def portfolio_averages(
df: pd.DataFrame,
groupvar: str,
avgvars: Union[str, List[str]],
ngroups: int = 10,
byvars: Optional[Union[str, List[str]]] = None,
cutdf: pd.DataFrame = None,
wtvar: Optional[str] = None,
count: Union[str, bool] = False,
portvar: str = "portfolio",
avgonly: bool = False,
) -> Union[pd.DataFrame, Tuple[pd.DataFrame, pd.DataFrame]]:
"""
Creates portfolios and calculates equal- and value-weighted averages of variables within portfolios.
If ngroups=10,
then will form 10 portfolios, with portfolio 1 having the bottom 10 percentile of groupvar, and portfolio 10 having
the top 10 percentile of groupvar.
:Notes:
Resets index and drops in output data, so don't use if index is important (input data not affected)
:param df: input data
:param groupvar: name of variable in df to form portfolios on
:param avgvars: variables to be averaged
:param ngroups: number of portfolios to form
:param byvars: name of variable(s) in df, finds portfolios within byvars. For example if byvars='Month',
would take each month and form portfolios based on the percentiles of the groupvar during only that month
:param cutdf: optionally determine percentiles using another dataset
:param wtvar: name of variable in df to use for weighting in weighted average
:param count: pass variable name to get count of non-missing of that variable within groups.
:param portvar: name of portfolio variable in the output dataset
:param avgonly: True to return only averages, False to return (averages, individual observations with portfolios)
:return:
"""
ports = portfolio(
df, groupvar, ngroups=ngroups, byvars=byvars, cutdf=cutdf, portvar=portvar
)
if byvars:
assert isinstance(byvars, (str, list))
if isinstance(byvars, str):
byvars = [byvars]
by = [portvar] + byvars
avgs = averages(ports, avgvars, byvars=by, wtvar=wtvar, count=count)
else:
avgs = averages(ports, avgvars, byvars=portvar, wtvar=wtvar, count=count)
if avgonly:
return avgs
else:
return avgs, ports | 23c902aafd341a7bbd8e6fc8b005e3cdb5a10f82 | 8,577 |
def get_zero_crossing_rate(y, get_mean=True):
"""
Compute the Zero Crossing Rate (ZCR)
:param y: np.ndarray [shape=(n,)]
Sampling rate of y
:param get_mean: bool
Whether to instead return the mean of ZCR over all frames
:return: np.ndarray [shape=(1,t)] or float
ZCR for each frame, or the mean ZCR
"""
zcrs = librosa.feature.zero_crossing_rate(y=y)
if get_mean:
return zcrs.mean()
else:
return zcrs | 782cd302acc69065d26837e45fb882714fa6b927 | 8,579 |
import math
def UF9(x):
"""
adapted from
https://github.com/Project-Platypus/Platypus/blob/master/platypus/problems.py
"""
nvars = len(x)
count1 = 0
count2 = 0
count3 = 0
sum1 = 0.0
sum2 = 0.0
sum3 = 0.0
E = 0.1
for j in range(3, nvars+1):
yj = x[j-1] - 2.0*x[1]*math.sin(2.0*math.pi*x[0] + j*math.pi/nvars)
if j % 3 == 1:
sum1 += yj**2
count1 += 1
elif j % 3 == 2:
sum2 += yj**2
count2 += 1
else:
sum3 += yj**2
count3 += 1
yj = (1.0 + E) * (1.0 - 4.0*(2.0*x[0] - 1.0)**2)
yj = max(yj, 0.0)
f1 = 0.5*(yj + 2.0*x[0])*x[1] + 2.0*sum1/count1
f2 = 0.5*(yj - 2.0*x[0] + 2.0)*x[1] + 2.0*sum2/count2
f3 = 1.0 - x[1] + 2.0*sum3/count3
return np.array([f1, f2, f3]) | 577b36653517e09cef764528920773ea51c5ed60 | 8,581 |
def antiderivate(values, ax_val, index, Nper, is_aper, is_phys, is_freqs):
"""Returns the anti-derivate of values along given axis
values is assumed to be periodic and axis is assumed to be a linspace
Parameters
----------
values: ndarray
array to derivate
ax_val: ndarray
axis values
index: int
index of axis along which to derivate
Nper: int
number of periods to replicate
is_aper: bool
True if values is anti-periodic along axis
is_phys: bool
True if physical quantity (time/angle/z)
is_freqs: bool
True if frequency axis
Returns
-------
values: ndarray
anti-derivate of values
"""
if is_freqs:
dim_array = np.ones((1, values.ndim), int).ravel()
dim_array[index] = -1
axis_reshaped = ax_val.reshape(dim_array)
values = values / (axis_reshaped * 2 * 1j * np.pi)
elif is_phys:
if ax_val.size > 1:
# Swap axis to always have integration axis on 1st position
values = np.swapaxes(values, index, 0)
if Nper is None:
# Taking input values
values_full = values
ax_full = ax_val
else:
# Add last point to axis
ax_full = np.concatenate(
(
ax_val,
np.array([ax_val[-1] + ax_val[1] - ax_val[0]]),
)
)
# Get values on a full (anti-)period
shape = list(values.shape)
shape[0] = shape[0] + 1
values_full = np.zeros(shape, dtype=values.dtype)
values_full[:-1, ...] = values
# Add first sample at the end of values to integrate on last interval
# Last value is the same as (respectively the opposite of) the first value
# in case of periodicity (respectively anti-periodicity)
values_full[-1, ...] = (-1) ** int(is_aper) * values[0, ...]
# Anti-derivate along axis
values = np.roll(
scp_int.cumulative_trapezoid(values_full, x=ax_full, axis=0),
shift=1,
axis=0,
)
# Integration constant is given by removing average value
values = values - np.mean(values, axis=0)
# Get N first values and swap axes back to origin
values = np.swapaxes(values, 0, index)
else:
raise Exception("Cannot anti-derivate along axis if axis size is 1")
else:
raise AxisError("Derivation only available for time/angle/z/freqs")
return values | 9280187e907e16f1b2b00a1e86acd43538adcbe4 | 8,583 |
def renumber_labels(label_img):
""" Re-number nuclei in a labeled image so the nuclei numbers are unique and consecutive.
"""
new_label = 0
for old_label in np.unique(label_img):
if not old_label == new_label:
label_img[label_img == old_label] = new_label
new_label += 1
return label_img | 4a37f151ba5a4e3066ce3656903b587f38deafea | 8,584 |
def __virtual__():
"""
Return virtual name of the module.
:return: The virtual name of the module.
"""
return __virtualname__ | 3f1a19fab2561ae1fb464d76a13e7a0b75af5c93 | 8,587 |
def getsamplev3(qcode):
"""Get a sample object of a given identifier
in API V3 style
Returns: A sample (v3) object
"""
scrit = SampleSearchCriteria()
scrit.withCode().thatEquals(qcode)
fetch_opt = SampleFetchOptions()
fetch_opt.withProperties()
fetch_opt.withSpace()
result = api.searchSamples(sessionToken, scrit, fetch_opt)
samples = []
for sample in result.getObjects():
samples.append(sample)
if len(samples) > 1:
raise mtbutils.MTBdropboxerror('More than one sample found with identifier {}'.format(qcode))
return samples[0] | 513de42ffd13f6b9abe74753e568e8db2fa473e3 | 8,588 |
def k892_distribution(mass):
"""Calculate normalized relativistic Breit-Wigner distribution value for K(892) at given mass"""
if k892_distribution.norm is None:
k892_distribution.norm = _norm(_k892_distribution_unnormalized)
return _k892_distribution_unnormalized(mass) / k892_distribution.norm | 38175808a7f9acf178604bf64935f0beeb3f7631 | 8,589 |
def ProcessMoleculesUsingSingleProcess(Mols, PAINSPatternMols, Writer, WriterFiltered):
"""Process and filter molecules using a single process."""
NegateMatch = OptionsInfo["NegateMatch"]
OutfileFilteredMode = OptionsInfo["OutfileFilteredMode"]
Compute2DCoords = OptionsInfo["OutfileParams"]["Compute2DCoords"]
SetSMILESMolProps = OptionsInfo["OutfileParams"]["SetSMILESMolProps"]
MiscUtil.PrintInfo("\nFiltering molecules...")
(MolCount, ValidMolCount, RemainingMolCount) = [0] * 3
FirstMol = True
for Mol in Mols:
MolCount += 1
if Mol is None:
continue
if RDKitUtil.IsMolEmpty(Mol):
MolName = RDKitUtil.GetMolName(Mol, MolCount)
MiscUtil.PrintWarning("Ignoring empty molecule: %s" % MolName)
continue
ValidMolCount += 1
if FirstMol:
FirstMol = False
if SetSMILESMolProps:
if Writer is not None:
RDKitUtil.SetWriterMolProps(Writer, Mol)
if WriterFiltered is not None:
RDKitUtil.SetWriterMolProps(WriterFiltered, Mol)
MolMatched = DoesMoleculeContainsPAINSPattern(Mol, PAINSPatternMols)
if MolMatched == NegateMatch:
RemainingMolCount += 1
WriteMolecule(Writer, Mol, Compute2DCoords)
else:
if OutfileFilteredMode:
WriteMolecule(WriterFiltered, Mol, Compute2DCoords)
return (MolCount, ValidMolCount, RemainingMolCount) | fe81953ce311724005c27ea309aa238578c4fd1c | 8,590 |
def UDiv(a: BitVec, b: BitVec) -> BitVec:
"""Create an unsigned division expression.
:param a:
:param b:
:return:
"""
return _arithmetic_helper(a, b, z3.UDiv) | fb3e300a96afdbf17fa7e6fff02379790b2dfd02 | 8,591 |
def _pressure_level_widths(tro3_cube, ps_cube, top_limit=0.0):
"""Create a cube with pressure level widths.
This is done by taking a 2D surface pressure field as lower bound.
Parameters
----------
tro3_cube : iris.cube.Cube
`Cube` containing `mole_fraction_of_ozone_in_air`.
ps_cube : iris.cube.Cube
`Cube` containing `surface_air_pressure`.
top_limit : float
Pressure in Pa.
Returns
-------
iris.cube.Cube
`Cube` of same shape as `tro3_cube` containing pressure level widths.
"""
pressure_array = _create_pressure_array(tro3_cube, ps_cube, top_limit)
data = _apply_pressure_level_widths(pressure_array)
p_level_widths_cube = tro3_cube.copy(data=data)
p_level_widths_cube.rename('pressure level widths')
p_level_widths_cube.units = ps_cube.units
return p_level_widths_cube | 53dd14f6e0b1fda249ecd10d0ad30cfb4e076d5a | 8,592 |
def load_model_configurations(sender):
"""
Iterates through setting MODELS_CRUD_EVENT searching for the sender
model configurations.
:param sender: Django Model
:return dict
"""
for model_config in settings.MODELS_CRUD_EVENT:
model = model_config['model']
app, model = model.rsplit('.', 1)
model = apps.get_app_config(app).get_model(model)
if sender == model:
return model_config
return None | e32d441de47f9bb1a78f93854e1c0436819c148b | 8,593 |
from typing import Optional
def get_user_by_private_or_public_nickname(nickname: str) -> Optional[User]:
"""
Gets the user by his (public) nickname, based on the option, whether his nickname is public or not
:param nickname: Nickname of the user
:return: Current user or None
"""
user: User = get_user_by_case_insensitive_nickname(nickname)
public_user: User = get_user_by_case_insensitive_public_nickname(nickname)
if not user or not public_user:
return None
settings: Settings = user.settings
if not settings:
return None
if settings.should_show_public_nickname and user:
return user
elif not settings.should_show_public_nickname and public_user:
return public_user
return None | 1dc43337c8e1372a32ed471ef8285544107cd22b | 8,594 |
def expose(window, context, name, monitor):
"""REST HTTP/HTTPS API to view tuples from a window on a stream.
Embeds a Jetty web server to provide HTTP REST access to the collection of tuples in `window` at the time of the last eviction for tumbling windows, or last trigger for sliding windows.
Example with a sliding window::
import streamsx.endpoint as endpoint
s = topo.source([{'a': 'Hello'}, {'a': 'World'}, {'a': '!'}]).as_json()
endpoint.expose(window=s.last(3).trigger(1), context='sample', name='view', monitor='endpoint-out')
The URL containing "**context**/**name**" for the sample above ends with: ``/sample/view/tuples``
**URL mapping**
The URL contains the following parts:
``https://<base-url>/<prefix>/<context>/<name>/<postfix>``
For a web-server in a job its URLs are exposed with **prefix** path:
* jobname/ - When a job name was explictly set. Job names should be simple mapping to a single path element.
* streams/jobs/jobid/ - When a job name was not explicitly set.
Example URLs within the cluster for application-name of "em" in project "myproject" are
* with a web-server in job named "transit" with context "sample" and name "view":
``https://em.myproject.svc:8443/transit/sample/view/tuples``
* with a web-server in job 7:
``https://em.myproject.svc:8443/streams/jobs/7/sample/view/tuples``
* retrieve information for job named "transit" with context "sample" and name "view":
``https://em.myproject.svc:8443/transit/sample/view/ports/info``
Args:
window(Window): Windowed stream of tuples that will be viewable using a HTTP GET request.
context(str): Defines an URL context path. URL contains ``context``/``name``.
name(str): Sink name in the Streams context. This name is part of the URL.
monitor(str): The name of the endpoint-monitor that provides the ssl configuration for this endpoint. If it is None, the connection uses plain HTTP
Returns:
streamsx.topology.topology.Sink: Stream termination.
"""
_add_toolkit_dependency(window.topology, '[4.3.0,5.0.0)')
sslAppConfigName = None
if monitor is not None:
sslAppConfigName = monitor + '-streams-certs'
_op = _HTTPTupleView(window, context=context, name=name, sslAppConfigName=sslAppConfigName)
return streamsx.topology.topology.Sink(_op) | ca3cf81c91ee89210da6989fdecce727d44273a1 | 8,595 |
import re
def get_order_args():
"""
Get order arguments, return a dictionary
{ <VIEW_NAME>: (ORDER_COL, ORDER_DIRECTION) }
Arguments are passed like: _oc_<VIEW_NAME>=<COL_NAME>&_od_<VIEW_NAME>='asc'|'desc'
"""
orders = {}
for arg in request.args:
re_match = re.findall('_oc_(.*)', arg)
if re_match:
order_direction = request.args.get('_od_' + re_match[0])
if order_direction in ('asc', 'desc'):
orders[re_match[0]] = (request.args.get(arg), order_direction)
return orders | a5e57f95479e15c8167434ff34c51cc80fc43f45 | 8,596 |
def version_info(): # pragma: no cover
"""
Get version of nameko_kafka package as tuple
"""
return tuple(map(int, __version__.split('.'))) | 8fe39c50a43e40a589abb51f56e2c7c503026712 | 8,597 |
def StrokePathCommandAddCapType(builder, capType):
"""This method is deprecated. Please switch to AddCapType."""
return AddCapType(builder, capType) | 4e7f852cde4993994ab5f7cf3e1b57700eaff7d3 | 8,598 |
def process_images(dummy_request):
"""Downloads and processes all images uploaded before resize logic fix deployment"""
global n_global_resized
media_bucket = storage_client.bucket(MEDIA_BUCKET)
process_global_images(db_pool, media_bucket)
process_user_images(db_pool, media_bucket)
return f"Done! \n\n resized, replaced: \nGlobal: {n_global_resized}\n User: {n_user_resized}" | ea3734ce797305f7305880b02d2696c3ca8a21c7 | 8,599 |
def _filename_pattern(ext):
"""Returns an re matching native or tfrecord files of format `ext`."""
return r".*\.{}(\.tfrecord)?(\.gz)?".format(ext) | 6ec5a86dbba2432293451ca7dff0a0d1d5091bf0 | 8,600 |
def assemble_remote_url():
"""
组装目标服务器URL, 即生成 parse.remote_url 的值
:rtype: str
"""
if parse.is_external_domain:
# 请求的是外部域名 (external domains)
scheme = 'https://' if parse.is_https else 'http://'
return urljoin(scheme + parse.remote_domain, parse.remote_path_query)
else:
# 请求的是主域名及可以被当做(alias)主域名的域名
return urljoin(target_scheme + target_domain, parse.remote_path_query) | f0e14ddb42636f12f4fafa31af4a87b3f91a4e05 | 8,601 |
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(drawbot.views.blueprint)
app.register_blueprint(user.views.blueprint)
return None | 936c17a95ddc013ec9f0c6c232a689245fc313d0 | 8,602 |
def write_attribute(xml_elem, elem: str=None, attrib: str=None, txt: str=None):
""" Write new text to a xml attribute.
Elem can be used to refer to a subelement of the current xml_elem
Args:
xml_elem: The current xml element
elem (str): The requested element tag name
attrib (str): The attribute name
txt (str): The new text for the element
Returns:
xml_elem: The modified xml element
"""
if xml_elem is not None:
if elem is not None:
xml_elem = try_get_single_element_from_xml(elem=elem, xml_elem=xml_elem)
if xml_elem is not None:
xml_elem.set(attrib, txt)
return xml_elem | d2ee296b6926a71ef2ffaf9fd9d47128f66e8806 | 8,603 |
def _ndarray_feature(x: np.ndarray) -> tf.train.Feature:
"""Create an ndarray feature stored as bytes."""
x_bytes = x.tostring()
feature = tf.train.Feature(bytes_list=tf.train.BytesList(value=[x_bytes]))
return feature | 03ad22f7d943d24574c92a494c915c28611a8d12 | 8,604 |
import re
def get_img_compliance_level(profile):
""" Try to figure out the IIIF Image API compliance level given the
`profile` value from a info.json.
"""
patt_iiif = re.compile('level([0-2])\.json$')
patt_stan = re.compile('#level([0-2])$')
def get_from_str(s):
m = None
if 'http://iiif.io/api/image/2/' in s:
m = patt_iiif.search(s)
elif 'http://library.stanford.edu/iiif/image-api/' in s:
m = patt_stan.search(s)
if m:
return int(m.group(1))
return -1
lvl = -1
if type(profile) == str:
lvl = get_from_str(profile)
elif type(profile) == list:
for p in [x for x in profile if type(x) == str]:
found = get_from_str(p)
if found != -1:
lvl = found
break
if lvl == -1:
log('Could not find compliance level in info.json.')
return lvl | 7970a795ea1b79bfea3df0e5a306e2d0286a61de | 8,605 |
def _extract_protocol_layers(deserialized_data):
"""
Removes unnecessary values from packets dictionaries.
:param deserialized_data: Deserialized data from tshark.
:return: List of filtered packets in dictionary format.
"""
packets_filtered = []
for packet in deserialized_data:
packets_filtered.append(packet["_source"]["layers"])
return packets_filtered | 3c3a899909c5278b29ffb402ccb4d8dde24fce3a | 8,606 |
from typing import Optional
from operator import gt
def calculate_affinity(
adata: AnnData,
level: int = 1,
block_key: Optional[str] = 'nsbm',
group_by: Optional[str] = None,
state: Optional = None,
neighbors_key: Optional[str] = 'neighbors',
adjacency: Optional[sparse.spmatrix] = None,
directed: bool = False,
use_weights: bool = False,
obsp: Optional[str] = None,
back_prob: bool = False,
copy: bool = False
) -> Optional[AnnData]:
"""\
Calculate cell affinity given a partition scheme. It can be used for
partitions calculated using schist or for any partition scheme, given
for example by cell annotations.
Parameters
----------
adata:
The AnnData object. Should have been already processed with schist
level:
The level to calculate affinity. This parameter is effective
only for Nested partitions
block_key:
The prefix for partitions. This parameter is ignored if the state
is not gt.NestedBlockState
group_by:
The key for group names used for calculations. Setting this will override
level and block_key. This is effective only for NestedBlockState partitions
state:
Optionally calculate affinities on this state.
neighbors_key
Use neighbors connectivities as adjacency.
If not specified, leiden looks .obsp['connectivities'] for connectivities
(default storage place for pp.neighbors).
If specified, leiden looks
.obsp[.uns[neighbors_key]['connectivities_key']] for connectivities.
adjacency
Sparse adjacency matrix of the graph, defaults to neighbors connectivities.
directed
Whether to treat the graph as directed or undirected.
use_weights
If `True`, edge weights from the graph are used in the computation
(placing more emphasis on stronger edges).
copy:
Return a new object or do everything in place
Returns
-------
Depending on `copy`, returns or updates `adata` with affinity values
in adata.obsm[f'CA_{block_key}_level_{level}']
"""
matrix_key = f'CA_{block_key}_level_{level}' # the default name of the matrix
if group_by:
logg.info(f'Calculating cell affinity to {group_by}')
else:
logg.info(f'Calculating cell affinity to level {level}')
if not state:
# if no state is provided, use the default to retrieve graph
if 'schist' in adata.uns and 'blocks' in adata.uns['schist'][f'{block_key}']:
params = adata.uns['schist'][f'{block_key}']['params']
if 'neighbors_key' in params:
neighbors_key=params['neighbors_key']
if 'use_weights' in params:
use_weights=params['use_weights']
if 'deg_corr' in params:
deg_corr=params['deg_corr']
state = state_from_blocks(adata,
state_key=block_key,
neighbors_key=neighbors_key,
adjacency=adjacency,
directed=directed,
use_weights=use_weights,
deg_corr=deg_corr
)
g = state.g
elif not neighbors_key:
# no state and no adjacency provided, raise an error
raise ValueError("A state or an adjacency matrix should be given"
"Otherwise a graph cannot be computed")
else:
# get the graph from the adjacency
adjacency = _choose_graph(adata, obsp, neighbors_key)
g = get_igraph_from_adjacency(adjacency, directed=directed)
g = g.to_graph_tool()
gt.remove_parallel_edges(g)
state = gt.BlockState(g)
else:
g = state.g
if group_by:
matrix_key = f'CA_{group_by}'
# if groups are given, we generate a new BlockState and work on that
if group_by in adata.obs.columns and adata.obs[group_by].dtype.name == 'category':
partitions = adata.obs[group_by].cat.codes.values
state = gt.BlockState(g, b=partitions)
if back_prob:
ca_matrix = get_cell_back_p(state)
else:
ca_matrix = get_cell_loglikelihood(state, as_prob=True)
else:
raise ValueError(f"{group_by} should be a categorical entry in adata.obs")
else:
# use precomputed blocks and states
if type(state) == gt.NestedBlockState:
if back_prob:
p0 = get_cell_back_p(state, level=0)
else:
p0 = get_cell_loglikelihood(state, level=0, as_prob=True)
group_col = None
if group_by and group_by in adata.obs.columns:
group_col = group_by
else:
g_name = f'{block_key}_level_{level}'
if g_name in adata.obs.columns:
group_col = g_name
if not group_col:
raise ValueError("The provided groups or level/blocks do not exist")
g0 = pd.Categorical(state.project_partition(0, 0).a)
cross_tab = pd.crosstab(g0, adata.obs[group_col], normalize='index')
ca_matrix = (p0 @ cross_tab).values
elif type(state) == gt.PPBlockState:
if back_prob:
ca_matrix = get_cell_back_p(state)
else:
ca_matrix = get_cell_loglikelihood(state, as_prob=True)
matrix_key = 'CA_ppbm'
adata.obsm[matrix_key] = ca_matrix
return adata if copy else None | e2eec0e9f45199d6cc1559d71dfbf629dba61621 | 8,607 |
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val) | d2ad3676549f427c134b38106a93145c54114052 | 8,609 |
def solve(topics):
"""Solve."""
a_words, b_words = get_dicts(topics)
candidates = []
original = []
duplicates = []
for a, b in topics:
# print(a, b)
# print(a_words[a], b_words[b])
if not (a_words[a] == 1 or b_words[b] == 1):
candidates.append((a, b))
else:
original.append((a, b))
a_words_org, b_words_org = get_dicts(original)
while len(candidates) > 0:
l_candidates = []
for a, b in candidates:
if a_words_org[a] >= 1 and b_words_org[b] >= 1:
duplicates.append((a, b))
else:
l_candidates.append((a, b))
candidates = l_candidates[:]
# print(candidates)
return len(candidates) | bb78da10ff6bb939bc0de9e0cc51a036c2a0e8b9 | 8,610 |
def get_package_plugin(package_type):
"""
Get a plugin for a specific package
Parameters
----------
package_type: str
The package type to fetch
Returns
-------
InvirtualEnvPlugin:
The invirtualenv plugin for the specific package_type
"""
for plugin in installed_plugins():
if package_type in plugin.package_formats:
return plugin | a1d97a6d1c4248f7a1bfeade8e734bcc0af3aceb | 8,611 |
def validate_basic_message(msg):
"""Validate basic messages.
This example just uses basic assertions but you could easily use a schema
library to get more sophisticated validators.
"""
assert msg.type == TYPE
assert "~l10n" in msg
assert "sent_time" in msg
assert "content" in msg
return msg | df6b1541adf86a295e6592f26d72ab2109617f6b | 8,613 |
def _filter_event_queryset(queryset, params, srs=None):
"""
Filter events queryset by params
(e.g. self.request.query_params in EventViewSet)
"""
# Filter by string (case insensitive). This searches from all fields
# which are marked translatable in translation.py
val = params.get('text', None)
if val:
val = val.lower()
# Free string search from all translated fields
fields = EventTranslationOptions.fields
# and these languages
languages = [x[0] for x in settings.LANGUAGES]
qset = Q()
for field in fields:
for lang in languages:
kwarg = {field + '_' + lang + '__icontains': val}
qset |= Q(**kwarg)
queryset = queryset.filter(qset)
val = params.get('last_modified_since', None)
# This should be in format which dateutil.parser recognizes, e.g.
# 2014-10-29T12:00:00Z == 2014-10-29T12:00:00+0000 (UTC time)
# or 2014-10-29T12:00:00+0200 (local time)
if val:
dt = parse_time(val, is_start=False)
queryset = queryset.filter(Q(last_modified_time__gte=dt))
val = params.get('start', None)
if val:
dt = parse_time(val, is_start=True)
queryset = queryset.filter(Q(end_time__gt=dt) | Q(start_time__gte=dt))
val = params.get('end', None)
if val:
dt = parse_time(val, is_start=False)
queryset = queryset.filter(Q(end_time__lt=dt) | Q(start_time__lte=dt))
val = params.get('bbox', None)
if val:
bbox_filter = build_bbox_filter(srs, val, 'position')
places = Place.geo_objects.filter(**bbox_filter)
queryset = queryset.filter(location__in=places)
# Filter by data source, multiple sources separated by comma
val = params.get('data_source', None)
if val:
val = val.split(',')
queryset = queryset.filter(data_source_id__in=val)
# Negative filter by data source, multiple sources separated by comma
val = params.get('data_source!', None)
if val:
val = val.split(',')
queryset = queryset.exclude(data_source_id__in=val)
# Filter by location id, multiple ids separated by comma
val = params.get('location', None)
if val:
val = val.split(',')
queryset = queryset.filter(location_id__in=val)
# Filter by keyword id, multiple ids separated by comma
val = params.get('keyword', None)
if val:
val = val.split(',')
queryset = queryset.filter(keywords__pk__in=val)
# Filter only super or sub events if recurring has value
val = params.get('recurring', None)
if val:
val = val.lower()
if val == 'super':
queryset = queryset.filter(is_recurring_super=True)
elif val == 'sub':
queryset = queryset.filter(is_recurring_super=False)
val = params.get('max_duration', None)
if val:
dur = parse_duration_string(val)
cond = 'end_time - start_time <= %s :: interval'
queryset = queryset.extra(where=[cond], params=[str(dur)])
val = params.get('min_duration', None)
if val:
dur = parse_duration_string(val)
cond = 'end_time - start_time >= %s :: interval'
queryset = queryset.extra(where=[cond], params=[str(dur)])
val = params.get('publisher', None)
if val:
queryset = queryset.filter(publisher__id=val)
return queryset | 35103268d301239d4c884d50ab5321ebb22ed235 | 8,614 |
import re
def process_user(enrollment, section):
"""Handle getting assignments for a single user
Args:
enrollment (canvasapi.enrollment.Enrollment): Canvas <Enrollment> object
section (canvasapi.section.Section): Canvas <Section> object
Returns:
[list]: formatted list for writing to the CSV
"""
missing = get_user_missing(section, enrollment.user["id"])
login = course.get_user(enrollment.user["id"]).login_id
regex = re.compile("@")
if regex.search(login) is None:
email = f"{login}@elkhart.k12.in.us"
else:
email = login
return [
enrollment.user["sortable_name"],
email,
section.name,
enrollment.last_activity_at,
len(missing),
", ".join(missing),
] | 88b471433d99c659eabac82a797530def3baf8f2 | 8,615 |
def op(name,
data,
bucket_count=None,
display_name=None,
description=None,
collections=None):
"""Create a histogram summary op.
Arguments:
name: A unique name for the generated summary node.
data: A `Tensor` of any shape. Must be castable to `float64`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two edge cases. If there is no data, then
there are no buckets. If there is data but all points have the
same value, then there is one bucket whose left and right
endpoints are the same.
display_name: Optional name for this summary in TensorBoard, as a
constant `str`. Defaults to `name`.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A TensorFlow summary op.
"""
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name, description=description)
with tf.name_scope(name):
tensor = _buckets(data, bucket_count=bucket_count)
return tf.summary.tensor_summary(name='histogram_summary',
tensor=tensor,
collections=collections,
summary_metadata=summary_metadata) | 368b5f0af59352e5f283566d9008c413d38692c9 | 8,616 |
def read_as_str(file):
"""
读取文件,并返回读取到的内容
"""
try:
with open(file, 'r') as f:
return f.read()
except IOError:
return "" | 934bec1e47e0f9af09be7f4d695a8ddf09004f3f | 8,617 |
def has_xml_header(filepath):
"""
Return True if the first line of the file is <?xml
:param filepath:
:return:
"""
return True | 21fdbdf36cf08ca18d8a0f0d7f7d2201b243c558 | 8,618 |
def shikaku(givens):
"""Solver for Shikaku minipuzzles."""
sym = grilops.make_number_range_symbol_set(0, SIZE * SIZE - 1)
sg = grilops.SymbolGrid(LATTICE, sym)
rc = grilops.regions.RegionConstrainer(
LATTICE,
solver=sg.solver,
rectangular=True
)
shifter = Shifter(sg.solver)
for p in LATTICE.points:
sg.solver.add(sg.cell_is(p, rc.region_id_grid[p]))
given = givens[p.y][p.x]
if given > 0:
given = shifter.given(p, given)
sg.solver.add(rc.parent_grid[p] == grilops.regions.R)
sg.solver.add(rc.region_size_grid[p] == given)
else:
sg.solver.add(rc.parent_grid[p] != grilops.regions.R)
assert sg.solve()
sg.print()
print()
shifter.print_shifts()
print()
return shifter.eval_binary() | e380ce634b342a19ecd466576e8e5d3ff28ccc25 | 8,619 |
def invert_qgniw(qh,phi,phih,k,l,f0):
""" Calculate the streamfunction given the potential vorticity.
The algorithm is:
1) Calculate wave potential vorticity
2) Invert for wave, pw, and vortex stremfunctions, pv.
3) Calculate the geostrophic stremfunction, p = pv+pw.
"""
wv2 = k**2 + l**2
wv2i = 1./wv2
wv2i[0,0] = 0
phih = np.fft.fft2(phi)
phix, phiy = np.fft.ifft2(1j*k*phih), np.fft.ifft2(1j*l*phih)
jach = np.fft.fft2((1j*(np.conj(phix)*phiy - np.conj(phiy)*phix)).real)
jach[0,0] = 0
# the wavy PV
phi2 = np.abs(phi)**2
gphi2h = -wv2*np.fft.fft2(phi2)
qwh = 0.5*(0.5*gphi2h + jach)/f0
# invert for psi
pw = np.fft.ifft2((wv2i*qwh)).real
pv = np.fft.ifft2(-(wv2i*qh)).real
p = pv+pw
ph = np.fft.fft2(p)
return ph | b90c5f76c2be93d0a45b8c260e7a7228094ff4c0 | 8,621 |
def package_ref_key(package_name, ref):
"""Returns ndb.Key corresponding to particular PackageRef."""
assert is_valid_package_ref(ref), ref
return ndb.Key(PackageRef, ref, parent=package_key(package_name)) | 270b69baa1309bf29a054736ca6f898f23839ee3 | 8,622 |
def conv2d_backprop_input(dout, x_size, weight, stride=1, pad=0):
"""Backpropagation input for conv2d."""
filter_num, _, filter_h, filter_w = weight.shape
dout = dout.transpose(0, 2, 3, 1).reshape(-1, filter_num)
col_w = weight.reshape(filter_num, -1).T
dcol = np.dot(dout, col_w.T)
dx = col2im(dcol, x_size, filter_h, filter_w, stride, pad)
return dx | 3749398101de25ec4f7b83c8a52754d18d0e8872 | 8,623 |
def fine_license_ratio(license_data, fine_data, column_name1=None, column_name2=None,year=None):
"""Get ratio of fines to licenses issued in a given year
Parameters:
-----------
license_data: DataFrame
Any subset of the Professional and Occupational Licensing dataframe
fine_data: DataFrame
Any subset of the Disciplinary Actions dataframe
year: int
Year to use to subset your data
column_name1: Series
Column containing years in license_data dataset
column_name2: Series
Column containing years in fine_data dataset
Returns:
--------
tuple
A tuple with license percentage as the first entry and fine percentage as the second
(year, ratio)
"""
int(year)
str(column_name1)
str(column_name2)
if year not in license_data[column_name1].unique() or year not in fine_data[column_name2].unique():
raise Exception(str(year) + " not a valid year for this dataset" + "\n----------------------------------------")
return "No Data for " + str(year)
else:
license_data = license_data[license_data[column_name1]==year]
fine_data = fine_data[fine_data[column_name2]==year]
try:
license_count = len(license_data)
fine_count = len(fine_data)
fine_percentage = fine_count/license_count * 100
license_percentage = 100 - fine_percentage
return license_percentage, fine_percentage, license_count, fine_count
except ZeroDivisionError:
print("Hmmm...It looks like there is are no licenses yet for the year " + str(year)) | 97dad8c4f5c78b1291016e1cd9fa9c5f8ef20beb | 8,625 |
def import_obj(obj_path, hard=False):
"""
import_obj imports an object by uri, example::
>>> import_obj("module:main")
<function main at x>
:param obj_path: a string represents the object uri.
:param hard: a boolean value indicates whether to raise an exception on
import failures.
"""
try:
# ``__import__`` of Python 2.x could not resolve unicode, so we need
# to ensure the type of ``module`` and ``obj`` is native str.
module, obj = str(obj_path).rsplit(':', 1)
m = __import__(module, globals(), locals(), [obj], 0)
return getattr(m, obj)
except (ValueError, AttributeError, ImportError):
if hard:
raise | fe6bc0cd8fff5c0d5b1ba9fc0e153b2004b09755 | 8,626 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.