content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def train_save_tfidf(filein, target):
"""input is a bow corpus saved as a tfidf file. The output is
a saved tfidf corpus"""
try:
corpus = corpora.MmCorpus(filein)
except:
raise NameError('HRMMPH. The file does not seem to exist. Create a file'+
'first by running the "train_save_dictionary_corpus" function.')
tfidf = models.TfidfModel(corpus)
tfidf.save(f'nlp_training_data/{target}_tfidf_model.tfidf')
tfidf_corpus = tfidf[corpus]
return tfidf_corpus | e4d41443d27f8b55f9fd6ba4b8c13a42d381a980 | 16,749 |
def ScrewTrajectoryList(Xstart, Xend, Tf, N, method, gripper_state, traj_list):
""" Modified from the modern_robotics library ScrewTrajectory
Computes a trajectory as a list of SE(3) matrices with a gripper value and
converts into a list of lists
Args:
Xstart : The initial end-effector configuration
Xend : The final end-effector configuration
Tf : Total time of the motion in seconds from rest to rest
N : The number of points N > 1 in the discrete representation of the trajectory
method : The time-scaling method
gripper_state : The gripper open (0) and closed (1) value
Returns:
traj_list : list of rotations, positions, and gripper state
"""
N = int(N)
timegap = Tf / (N - 1.0)
traj = [[None]] * N
for i in range(N):
if method == 3:
s = CubicTimeScaling(Tf, timegap * i)
else:
s = QuinticTimeScaling(Tf, timegap * i)
traj[i] = np.dot(Xstart, MatrixExp6(MatrixLog6(np.dot(TransInv(Xstart), Xend)) * s))
traj = np.asarray(traj)
for i in range(N):
r11 = traj[i][0][0]
r12 = traj[i][0][1]
r13 = traj[i][0][2]
r21 = traj[i][1][0]
r22 = traj[i][1][1]
r23 = traj[i][1][2]
r31 = traj[i][2][0]
r32 = traj[i][2][1]
r33 = traj[i][2][2]
px = traj[i][0][3]
py = traj[i][1][3]
pz = traj[i][2][3]
traj_list.append([r11, r12, r13, r21, r22, r23, r31, r32, r33, px, py, pz, gripper_state])
return traj_list | 146f4f7b96207c74bbe0ed08e162c3ba656d7a43 | 16,750 |
def calculate_phase(time, period):
"""Calculates phase based on period.
Parameters
----------
time : type
Description of parameter `time`.
period : type
Description of parameter `period`.
Returns
-------
list
Orbital phase of the object orbiting the star.
"""
return (time % period) / period | a537810a7705b5d8b0144318469b249f64a01456 | 16,751 |
def perspective_transform(img):
"""
Do a perspective transform over an image.
Points are hardcoded and depend on the camera and it's positioning
:param img:
:return:
"""
pts1 = np.float32([[250, 686], [1040, 680], [740, 490], [523, 492]])
pts2 = np.float32([[295, 724], [980, 724], [988, 164], [297, 150]])
M = cv2.getPerspectiveTransform(pts1, pts2)
transformed_image = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))
return transformed_image | 51411c1fc73e897a657e2e89c44275796b16a1b6 | 16,753 |
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 400)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 400)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 400)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html") | ffa2152cffdbfd161f3e8aa23aefa3c49993e630 | 16,754 |
def value_iteration(P, nS, nA, gamma=0.9, tol=1e-3):
"""
Learn value function and policy by using value iteration method for a given
gamma and environment.
Parameters:
----------
P, nS, nA, gamma:
defined at beginning of file
tol: float
Terminate value iteration when
max |value(s) - prev_value(s)| < tol
Returns:
----------
value: np.ndarray[nS]
policy: np.ndarray[nS]
"""
value = np.zeros(nS) # value function initialized
policy = np.zeros(nS, dtype=int) # policy initialized
while True: # until convergence or finite horizon overflow
new_value = np.zeros(nS)
for state in range(nS): # for each state
best_Q_value = -float("inf") # we are looking for the best action in term of Q value
for action in range(nA): # for each action
p = P[state][action] # {(probability, nextstate, reward, terminal),...}[state,action]
reward = sum([i[0]*i[2] for i in p]) # expected reward for this state-action
Q_value = reward + gamma*(sum([i[0]*value[i[1]] for i in p])) # expected reward + gamma * expected value for this state-action
if Q_value > best_Q_value:
new_value[state] = Q_value # max_a Q for this state
policy[state] = action # argmax_a Q for this state
best_Q_value = Q_value
if np.max(np.abs(new_value - value)) < tol: # convergence
value = new_value
break
value = new_value
return value, policy | 7362b95cd453f0983e6b82acf73d0350eae2734c | 16,755 |
def get_clients():
"""
Determine if the current user has a connected client.
"""
return jsonify(g.user.user_id in clients) | 602d77b25b6608db24fca66d5bc55bc83a0530e8 | 16,756 |
def get_split_indices(word, curr_tokens, include_joiner_token, joiner):
"""Gets indices for valid substrings of word, for iterations > 0.
For iterations > 0, rather than considering every possible substring, we only
want to consider starting points corresponding to the start of wordpieces in
the current vocabulary.
Args:
word: string we want to split into substrings
curr_tokens: string to int dict of tokens in vocab (from previous iteration)
include_joiner_token: bool whether to include joiner token
joiner: string used to indicate suffixes
Returns:
list of ints containing valid starting indices for word
"""
indices = []
start = 0
while start < len(word):
end = len(word)
while end > start:
subtoken = word[start:end]
# Subtoken includes the joiner token.
if include_joiner_token and start > 0:
subtoken = joiner + subtoken
# If subtoken is part of vocab, 'end' is a valid start index.
if subtoken in curr_tokens:
indices.append(end)
break
end -= 1
if end == start:
return None
start = end
return indices | 495d924716cfd0e14430d225e50b313fea305dbb | 16,757 |
def perspective(
vlist: list[list[Number,
Number,
Number]],
rotvec: list[list[float, float],
list[float, float],
list[float, float]],
dispvec: list[Number,
Number,
Number],
d: float) -> tuple:
"""Projects 3D points to 2D and
apply rotation and translation
vectors
Args:
vlist : list of 3D vertices
rotvec : 3D rotation vector
dispvec: 3D translation vector
d : Distance of observer
from the screen
Returns:
tuple (list, list)
"""
projvlist = []
rotvlist = []
((sroll, croll),
(spitch, cpitch),
(syaw, cyaw)) = rotvec
for p in vlist:
(px, py, pz) = p
x1 = -cyaw * px - syaw * pz
y1 = croll * py - sroll * x1
z1 = -syaw * px + cyaw * pz
x = croll * x1 + sroll * py
y = spitch * z1 + cpitch * y1
z = cpitch * z1 - spitch * y1
x += dispvec[0]
y += dispvec[1]
z += dispvec[2]
rotvlist.append([x, y, z])
projvlist.append([-d * x / z,
-d * y / z])
return (rotvlist, projvlist) | daece49851ecca55ba30d4f6f82fe59d5deb5497 | 16,758 |
def actor_is_contact(api_user, nick, potential_contact):
"""Determine if one is a contact.
PARAMETERS:
potential_contact - stalkee.
RETURNS: boolean
"""
nick = clean.user(nick)
potential_contact = clean.user(potential_contact)
key_name = Relation.key_from(relation='contact',
owner=nick,
target=potential_contact)
rel_ref = Relation.get_by_key_name(key_name)
return rel_ref and True | 93a3bfd0a52b2acb043c162428f0fa45754702bf | 16,760 |
def compute_mem(w, n_ring=1, spectrum='nonzero', tol=1e-10):
"""Compute Moran eigenvectors map.
Parameters
----------
w : BSPolyData, ndarray or sparse matrix, shape = (n_vertices, n_vertices)
Spatial weight matrix or surface. If surface, the weight matrix is
built based on the inverse geodesic distance between each vertex
and the vertices in its `n_ring`.
n_ring : int, optional
Neighborhood size to build the weight matrix. Only used if user
provides a surface mesh. Default is 1.
spectrum : {'all', 'nonzero'}, optional
Eigenvalues/vectors to select. If 'all', recover all eigenvectors
except the smallest one. Otherwise, select all except non-zero
eigenvectors. Default is 'nonzero'.
tol : float, optional
Minimum value for an eigenvalue to be considered non-zero.
Default is 1e-10.
Returns
-------
w : 1D ndarray, shape (n_components,)
Eigenvalues in descending order. With ``n_components = n_vertices - 1``
if ``spectrum == 'all'`` and ``n_components = n_vertices - n_zero`` if
``spectrum == 'nonzero'``, and `n_zero` is number of zero eigenvalues.
mem : 2D ndarray, shape (n_vertices, n_components)
Eigenvectors of the weight matrix in same order.
See Also
--------
:func:`.moran_randomization`
:class:`.MoranRandomization`
References
----------
* Wagner H.H. and Dray S. (2015). Generating spatially constrained
null models for irregularly spaced data using Moran spectral
randomization methods. Methods in Ecology and Evolution, 6(10):1169-78.
"""
if spectrum not in ['all', 'nonzero']:
raise ValueError("Unknown autocor '{0}'.".format(spectrum))
# If surface is provided instead of affinity
if not (isinstance(w, np.ndarray) or ssp.issparse(w)):
w = me.get_ring_distance(w, n_ring=n_ring, metric='geodesic')
w.data **= -1 # inverse of distance
# w /= np.nansum(w, axis=1, keepdims=True) # normalize rows
if not is_symmetric(w):
w = make_symmetric(w, check=False, sparse_format='coo')
# Doubly centering weight matrix
if ssp.issparse(w):
m = w.mean(axis=0).A
wc = w.mean() - m - m.T
if not ssp.isspmatrix_coo(w):
w_format = w.format
w = w.tocoo(copy=False)
row, col = w.row, w.col
w = getattr(w, 'to' + w_format)(copy=False)
else:
row, col = w.row, w.col
wc[row, col] += w.data
else:
m = w.mean(axis=0, keepdims=True)
wc = w.mean() - m - m.T
wc += w
# when using float64, eigh is unstable for sparse matrices
ev, mem = np.linalg.eigh(wc.astype(np.float32))
ev, mem = ev[::-1], mem[:, ::-1]
# Remove zero eigen-value/vector
ev_abs = np.abs(ev)
mask_zero = ev_abs < tol
n_zero = np.count_nonzero(mask_zero)
if n_zero == 0:
raise ValueError('Weight matrix has no zero eigenvalue.')
# Multiple zero eigenvalues
if spectrum == 'all':
if n_zero > 1:
n = w.shape[0]
memz = np.hstack([mem[:, mask_zero], np.ones((n, 1))])
q, _ = np.linalg.qr(memz)
mem[:, mask_zero] = q[:, :-1]
idx_zero = mask_zero.argmax()
else:
idx_zero = ev_abs.argmin()
ev[idx_zero:-1] = ev[idx_zero+1:]
mem[:, idx_zero:-1] = mem[:, idx_zero + 1:]
ev = ev[:-1]
mem = mem[:, :-1]
else: # only nonzero
mask_nonzero = ~mask_zero
ev = ev[mask_nonzero]
mem = mem[:, mask_nonzero]
return mem, ev | fe622d75816629aaf5fce34405eb7a3021393d7d | 16,761 |
def eval_BenchmarkModel(x, a, y, model, loss):
"""
Given a dataset (x, a, y) along with predictions,
loss function name
evaluate the following:
- average loss on the dataset
- DP disp
"""
pred = model(x) # apply model to get predictions
n = len(y)
if loss == "square":
err = mean_squared_error(y, pred) # mean square loss
elif loss == "absolute":
err = mean_absolute_error(y, pred) # mean absolute loss
## functions from sklearn.metrics library.
## The strange thing is that in the evaluate_FairModel function, the author uses his own function.
elif loss == "logistic": # assuming probabilistic predictions
# take the probability of the positive class
pred = pd.DataFrame(pred).iloc[:, 1]
err = log_loss(y, pred, eps=1e-15, normalize=True)
else:
raise Exception('Loss not supported: ', str(loss))
disp = pred2_disp(pred, a, y, loss)
## this function seems incomplete
## because i cannot find the definition of function argument quantization.
loss_vec = loss_vec2(pred, y, loss)
## Isn't this equal to the error part???
loss_mean, loss_std = norm.fit(loss_vec)
evaluation = {}
evaluation['pred'] = pred
evaluation['average_loss'] = err
evaluation['DP_disp'] = disp['DP']
evaluation['disp_std'] = KS_confbdd(n, alpha=0.05)
evaluation['loss_std'] = loss_std / np.sqrt(n)
return evaluation | cdb4e82004d94c7b25a705d33f716ac3d81e38de | 16,762 |
def parse_sgf_game(s):
"""Read a single SGF game from a string, returning the parse tree.
s -- 8-bit string
Returns a Coarse_game_tree.
Applies the rules for FF[4].
Raises ValueError if can't parse the string.
If a property appears more than once in a node (which is not permitted by
the spec), treats it the same as a single property with multiple values.
Identifies the start of the SGF content by looking for '(;' (with possible
whitespace between); ignores everything preceding that. Ignores everything
following the first game.
"""
game_tree, _ = _parse_sgf_game(s, 0)
if game_tree is None:
raise ValueError("no SGF data found")
return game_tree | 4315277a91f732f92c3001cf570221ab6aa657a7 | 16,763 |
import re
def retrieve(
framework,
region,
version=None,
py_version=None,
instance_type=None,
accelerator_type=None,
image_scope=None,
container_version=None,
distribution=None,
base_framework_version=None,
):
"""Retrieves the ECR URI for the Docker image matching the given arguments.
Args:
framework (str): The name of the framework or algorithm.
region (str): The AWS region.
version (str): The framework or algorithm version. This is required if there is
more than one supported version for the given framework or algorithm.
py_version (str): The Python version. This is required if there is
more than one supported Python version for the given framework version.
instance_type (str): The SageMaker instance type. For supported types, see
https://aws.amazon.com/sagemaker/pricing/instance-types. This is required if
there are different images for different processor types.
accelerator_type (str): Elastic Inference accelerator type. For more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html.
image_scope (str): The image type, i.e. what it is used for.
Valid values: "training", "inference", "eia". If ``accelerator_type`` is set,
``image_scope`` is ignored.
container_version (str): the version of docker image
distribution (dict): A dictionary with information on how to run distributed training
(default: None).
Returns:
str: the ECR URI for the corresponding SageMaker Docker image.
Raises:
ValueError: If the combination of arguments specified is not supported.
"""
config = _config_for_framework_and_scope(framework, image_scope, accelerator_type)
original_version = version
version = _validate_version_and_set_if_needed(version, config, framework)
version_config = config["versions"][_version_for_config(version, config)]
if framework == HUGGING_FACE_FRAMEWORK:
if version_config.get("version_aliases"):
full_base_framework_version = version_config["version_aliases"].get(
base_framework_version, base_framework_version
)
_validate_arg(full_base_framework_version, list(version_config.keys()), "base framework")
version_config = version_config.get(full_base_framework_version)
py_version = _validate_py_version_and_set_if_needed(py_version, version_config, framework)
version_config = version_config.get(py_version) or version_config
registry = _registry_from_region(region, version_config["registries"])
hostname = utils._botocore_resolver().construct_endpoint("ecr", region)["hostname"]
repo = version_config["repository"]
processor = _processor(
instance_type, config.get("processors") or version_config.get("processors")
)
if framework == HUGGING_FACE_FRAMEWORK:
pt_or_tf_version = (
re.compile("^(pytorch|tensorflow)(.*)$").match(base_framework_version).group(2)
)
tag_prefix = f"{pt_or_tf_version}-transformers{original_version}"
else:
tag_prefix = version_config.get("tag_prefix", version)
tag = _format_tag(
tag_prefix,
processor,
py_version,
container_version,
)
if _should_auto_select_container_version(instance_type, distribution):
container_versions = {
"tensorflow-2.3-gpu-py37": "cu110-ubuntu18.04-v3",
"tensorflow-2.3.1-gpu-py37": "cu110-ubuntu18.04",
"tensorflow-2.3.2-gpu-py37": "cu110-ubuntu18.04",
"tensorflow-1.15-gpu-py37": "cu110-ubuntu18.04-v8",
"tensorflow-1.15.4-gpu-py37": "cu110-ubuntu18.04",
"tensorflow-1.15.5-gpu-py37": "cu110-ubuntu18.04",
"mxnet-1.8-gpu-py37": "cu110-ubuntu16.04-v1",
"mxnet-1.8.0-gpu-py37": "cu110-ubuntu16.04",
"pytorch-1.6-gpu-py36": "cu110-ubuntu18.04-v3",
"pytorch-1.6.0-gpu-py36": "cu110-ubuntu18.04",
"pytorch-1.6-gpu-py3": "cu110-ubuntu18.04-v3",
"pytorch-1.6.0-gpu-py3": "cu110-ubuntu18.04",
}
key = "-".join([framework, tag])
if key in container_versions:
tag = "-".join([tag, container_versions[key]])
if tag:
repo += ":{}".format(tag)
return ECR_URI_TEMPLATE.format(registry=registry, hostname=hostname, repository=repo) | eeee1aec620de5b29650b9605c7fb2b13aed76e5 | 16,764 |
def deconstruct_DMC(G, alpha, beta):
"""Deconstruct a DMC graph over a single step."""
# reverse complementation
if G.has_edge(alpha, beta):
G.remove_edge(alpha, beta)
w = 1
else:
w = 0
# reverse mutation
alpha_neighbors = set(G.neighbors(alpha))
beta_neighbors = set(G.neighbors(beta))
x = len(alpha_neighbors & beta_neighbors)
y = len(alpha_neighbors | beta_neighbors)
for neighbor in alpha_neighbors:
G.add_edge(beta, neighbor)
# reverse duplication
G.remove_node(alpha)
return (w, x, y) | fa32a325fd49435e3191a20b908ac0e9c3b992f8 | 16,765 |
def new_followers_view(request):
"""
View to show new followers.
:param request:
:return:
"""
current_author = request.user.user
followers_new = FollowRequest.objects.all().filter(friend=current_author).filter(acknowledged=False)
for follow in followers_new:
follow.acknowledged = True
follow.save()
request.context['followers_new'] = followers_new
return render(request, 'authors/follower_request.html', request.context) | 88277967b8185c47b9bb955dabf6fcd79ea3a530 | 16,766 |
def inv(a, p):
"""Inverse of a in :math:`{mathbb Z}_p`
:param a,p: non-negative integers
:complexity: O(log a + log p)
"""
return bezout(a, p)[0] % p | d2caab3a564d5f58d1be345900382e762350a2ea | 16,767 |
def metadata_columns(request, metadata_column_headers):
"""Make a metadata column header and column value dictionary."""
template = 'val{}'
columns = {}
for header in metadata_column_headers:
columns[header] = []
for i in range(0, request.param):
columns[header].append(template.format(i))
return columns | ca1f89935260e9d55d57df5fe5fbb0946b5948ac | 16,769 |
def all_done_tasks_for_person(person, client=default):
"""
Returns:
list: Tasks that are done for given person (only for open projects).
"""
person = normalize_model_parameter(person)
return raw.fetch_all("persons/%s/done-tasks" % person["id"], client=client) | 68883d7ac9c1e0cd009ff02ae4944782ae6fc637 | 16,770 |
def transform_cfg_to_wcnf(cfg: CFG) -> CFG:
"""
Transform given cfg into Weakened Normal Chomsky Form (WNCF)
Parameters
----------
cfg: CFG
CFG object to transform to WNCF
Returns
-------
wncf: CFG
CFG in Weakened Normal Chomsky Form (WNCF)
"""
wncf = (
cfg.remove_useless_symbols()
.eliminate_unit_productions()
.remove_useless_symbols()
)
new_productions = wncf._get_productions_with_only_single_terminals()
new_productions = wncf._decompose_productions(new_productions)
return CFG(start_symbol=wncf.start_symbol, productions=new_productions) | 55d72634b02feab7150d290619b40fc2976ffae3 | 16,771 |
def insert_scope_name(urls):
"""
given a tuple of URLs for webpy with '%s' as a placeholder for
SCOPE_NAME_REGEXP, return a finalised tuple of URLs that will work for all
SCOPE_NAME_REGEXPs in all schemas
"""
regexps = get_scope_name_regexps()
result = []
for i in range(0, len(urls), 2):
if "%s" in urls[i]:
# add a copy for each unique SCOPE_NAME_REGEXP
for scope_name_regexp in regexps:
result.append(urls[i] % scope_name_regexp)
result.append(urls[i + 1])
else:
# pass through unmodified
result.append(urls[i])
result.append(urls[i + 1])
return tuple(result) | 28cda0956f232adf176666c776b39463caca9847 | 16,773 |
def fit_cochrane_orcutt(ts, regressors, maxIter=10, sc=None):
"""
Fit linear regression model with AR(1) errors , for references on Cochrane Orcutt model:
See [[https://onlinecourses.science.psu.edu/stat501/node/357]]
See : Applied Linear Statistical Models - Fifth Edition - Michael H. Kutner , page 492
The method assumes the time series to have the following model
Y_t = B.X_t + e_t
e_t = rho*e_t-1+w_t
e_t has autoregressive structure , where w_t is iid ~ N(0,&sigma 2)
Outline of the method :
1) OLS Regression for Y (timeseries) over regressors (X)
2) Apply auto correlation test (Durbin-Watson test) over residuals , to test whether e_t still
have auto-regressive structure
3) if test fails stop , else update update coefficients (B's) accordingly and go back to step 1)
Parameters
----------
ts:
Vector of size N for time series data to create the model for as a Numpy array
regressors:
Matrix N X K for the timed values for K regressors over N time points as a Numpy array
maxIter:
maximum number of iterations in iterative cochrane-orchutt estimation
Returns instance of class [[RegressionARIMAModel]]
"""
assert sc != None, "Missing SparkContext"
jvm = sc._jvm
fnord = _nparray2breezematrix(sc, regressors)
print(fnord)
jmodel = jvm.com.cloudera.sparkts.models.RegressionARIMA.fitCochraneOrcutt(
_nparray2breezevector(sc, ts), _nparray2breezematrix(sc, regressors), maxIter
)
return RegressionARIMAModel(jmodel=jmodel, sc=sc) | 958ca88e6ac37ebd58c7f1ff88c191d801e4cb87 | 16,774 |
def get_nodeweight(obj):
"""
utility function that returns a
node class and it's weight
can be used for statistics
to get some stats when NO Advanced Nodes are available
"""
k = obj.__class__.__name__
if k in ('Text',):
return k, len(obj.caption)
elif k == 'ImageLink' and obj.isInline():
return 'InlineImageLink', 1
return k, 1 | 1ab88f73621c8396fca08551dd14c9a757d019ad | 16,775 |
def CMYtoRGB(C, M, Y):
""" convert CMY to RGB color
:param C: C value (0;1)
:param M: M value (0;1)
:param Y: Y value (0;1)
:return: RGB tuple (0;255) """
RGB = [(1.0 - i) * 255.0 for i in (C, M, Y)]
return tuple(RGB) | cfc2c7b91dd7f1faf93351e28ffdd9906613471a | 16,776 |
def update_local_artella_root():
"""
Updates the environment variable that stores the Artella Local Path
NOTE: This is done by Artella plugin when is loaded, so we should not do it manually again
"""
metadata = get_metadata()
if metadata:
metadata.update_local_root()
return True
return False | 23fb9f0eb47aec566dc6b9862474535545b963dc | 16,777 |
def app_tests(enable_migrations, tags, verbosity):
"""Gets the TestRunner and runs the tests"""
# prepare the actual test environment
setup(enable_migrations, verbosity)
# reuse Django's DiscoverRunner
if not hasattr(settings, 'TEST_RUNNER'):
settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner'
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=verbosity,
tags=tags,
)
failures = test_runner.run_tests(['.'])
return failures | c56ca20ea98dadf97f39a30e2f07c0eb3952b418 | 16,778 |
def quicksort(numbers, low, high):
"""Python implementation of quicksort."""
if low < high:
pivot = _partition(numbers, low, high)
quicksort(numbers, low, pivot)
quicksort(numbers, pivot + 1, high)
return numbers | 064aa30f032036aa73f08b2b94ce4556ffc565fd | 16,779 |
import scipy
def bsput_delta(k, t, *, x0=1., r=0., q=0., sigma=1.):
"""
bsput_delta(k, t, *, x0=1., r=0., q=0., sigma=1.)
Black-Scholes put option delta.
See Also
--------
bscall
"""
r, q = np.asarray(r), np.asarray(q)
d1, d2 = bsd1d2(k, t, x0=x0, r=r, q=q, sigma=sigma)
return -exp(-q*t) * scipy.stats.norm.cdf(-d1) | d6e1e3e6c2f97fa856b156170ac49ea3d5530423 | 16,780 |
from typing import Sequence
import re
def regex_filter(patterns: Sequence[Regex], negate: bool = False, **kwargs) -> SigMapper:
"""Filter out the signals that do not match regex patterns (or do match if negate=True)."""
patterns = list(map(re.compile, patterns))
def filt(sigs):
def map_sig(sig):
return _regex_map(sig, patterns,
on_match = lambda s, p: (s if not negate else None),
on_no_match = lambda s: (None if not negate else s),
**kwargs)
return list(filter(None, map(map_sig, sigs)))
return filt | 4c76d4bd5f76d5d35373ec14c910291b155cd4db | 16,781 |
def cpncc(img, vertices_lst, tri):
"""cython version for PNCC render: original paper"""
h, w = img.shape[:2]
c = 3
pnccs_img = np.zeros((h, w, c))
for i in range(len(vertices_lst)):
vertices = vertices_lst[i]
pncc_img = crender_colors(vertices, tri, pncc_code, h, w, c)
pnccs_img[pncc_img > 0] = pncc_img[pncc_img > 0]
pnccs_img = pnccs_img.squeeze() * 255
return pnccs_img | 8c7e380b56e26197cfb6b9b65c8d373ada0be4b1 | 16,782 |
import time
def run_net(X, y, batch_size, dnn, data_layer_name, label_layer_name,
loss_layer, accuracy_layer, accuracy_sink, is_train):
"""Runs dnn on given data"""
start = time.time()
total_loss = 0.
run_iter = dnn.learn if is_train else dnn.run
math_engine = dnn.math_engine
accuracy_layer.reset = True # Reset previous statistics
for X_batch, y_batch in irnn_data_iterator(X, y, batch_size, math_engine):
run_iter({data_layer_name: X_batch, label_layer_name: y_batch})
total_loss += loss.last_loss * y_batch.batch_width
accuracy_layer.reset = False # Don't reset statistics within one epoch
avg_loss = total_loss / y.shape[0]
acc = accuracy_sink.get_blob().asarray()[0]
run_time = time.time() - start
return avg_loss, acc, run_time | 43121dff269df6a03763f130e8f75f0ce6984a57 | 16,783 |
from typing import Any
import json
def json_safe(arg: Any):
"""
Checks whether arg can be json serialized and if so just returns arg as is
otherwise returns none
"""
try:
json.dumps(arg)
return arg
except:
return None | 97ac87464fb4b31b4fcfc7896252d23a10e57b72 | 16,784 |
def _key_iv_check(key_iv):
"""
密钥或初始化向量检测
"""
# 密钥
if key_iv is None or not isinstance(key_iv, string_types):
raise TypeError('Parameter key or iv:{} not a basestring'.format(key_iv))
if isinstance(key_iv, text_type):
key_iv = key_iv.encode(encoding=E_FMT)
if len(key_iv) > BLOCK_BYTE:
raise ValueError('Parameter key or iv:{} byte greater than {}'.format(key_iv.decode(E_FMT),
BLOCK_BYTE))
return key_iv | 809ff811a433f9843b330a56be926411871d8b7a | 16,785 |
def decomposeArbitraryLength(number):
"""
Returns decomposition for the numbers
Examples
--------
number 42 : 32 + 8 + 2
powers : 5, 3, 1
"""
if number < 1:
raise WaveletException("Number should be greater than 1")
tempArray = list()
current = number
position = 0
while current >= 1.:
power = getExponent(current)
tempArray.append(power)
current = current - scalb(1., power)
position += 1
return tempArray[:position] | 5645c9024dd93aa3bfaf904d7a69f4d46977fb5a | 16,786 |
def ax_draw_macd2(axes, ref, kdata, n1=12, n2=26, n3=9):
"""绘制MACD
:param axes: 指定的坐标轴
:param KData kdata: KData
:param int n1: 指标 MACD 的参数1
:param int n2: 指标 MACD 的参数2
:param int n3: 指标 MACD 的参数3
"""
macd = MACD(CLOSE(kdata), n1, n2, n3)
bmacd, fmacd, smacd = macd.getResult(0), macd.getResult(1), macd.getResult(2)
text = 'MACD(%s,%s,%s) DIF:%.2f, DEA:%.2f, BAR:%.2f'%(n1,n2,n3,fmacd[-1],smacd[-1],bmacd[-1])
#axes.text(0.01,0.97, text, horizontalalignment='left', verticalalignment='top', transform=axes.transAxes)
total = len(kdata)
x = [i for i in range(total)]
y = bmacd
y1,y2,y3 = [y[0]],[y[0]],[y[0]]
for i in range(1, total):
if ref[i]-ref[i-1]>0 and y[i]-y[i-1]>0:
y2.append(y[i])
y1.append('-')
y3.append('-')
elif ref[i]-ref[i-1]<0 and y[i]-y[i-1]<0:
y3.append(y[i])
y1.append('-')
y2.append('-')
else:
y1.append(y[i])
y2.append('-')
y3.append('-')
style = gcf().get_style(axes)
bar = Bar(subtitle=text, title_pos='10%', title_top='8%')
bar.add('1', x, y1, is_stack=True, is_legend_show=False, **style)
bar.add('2', x, y2, is_stack=True, is_legend_show=False, **style)
bar.add('3', x, y3, is_stack=True, is_legend_show=False, **style)
axes.add(bar)
fmacd.plot(axes=axes, line_type='dotted')
smacd.plot(axes=axes)
gcf().add_axis(axes)
return gcf() | 3bcb73756211a8906f3bf601207092177aa45ade | 16,787 |
import scipy
def scipy_bfgs(
criterion_and_derivative,
x,
*,
convergence_absolute_gradient_tolerance=CONVERGENCE_ABSOLUTE_GRADIENT_TOLERANCE,
stopping_max_iterations=STOPPING_MAX_ITERATIONS,
norm=np.inf,
):
"""Minimize a scalar function of one or more variables using the BFGS algorithm.
For details see :ref:`list_of_scipy_algorithms`.
"""
options = {
"gtol": convergence_absolute_gradient_tolerance,
"maxiter": stopping_max_iterations,
"norm": norm,
}
res = scipy.optimize.minimize(
fun=criterion_and_derivative,
x0=x,
method="BFGS",
jac=True,
options=options,
)
return process_scipy_result(res) | e1d61454e7ea782d37b4ab222599c69b2c89df1b | 16,788 |
from random import shuffle
import six
def assign_to_coders_backend(sample,
limit_to_unassigned,
shuffle_pieces_before_assigning,
assign_each_piece_n_times,
max_assignments_per_piece,
coders, max_pieces_per_coder,
creation_time, creator):
"""Assignment to coders currently uses the following algorithm:
#. Get a list of all pieces in the sample.
#. If "shuffle pieces before assigning" is checked, shuffle the list of pieces
#. Make a numbering of "target coders" for this assignment, determine a
coder whose "turn" it is.
#. For each piece in the list of pieces, do the following:
#. If "limit to unassigned" is checked, and the piece is assigned to
someone, continue to the next piece.
#. Find how often this piece has already been assigned as
``n_piece_assignments``.
#. Determine number of new assignments *n* for this piece as::
n = min(
max_assignments_per_piece-n_piece_assignments,
assign_each_piece_n_times))
#. Do the following *n* times:
#. Try to assign the piece to the coder whose 'turn' it is.
#. If that coder already has this article assigned, go
round-robin among coders until someone does not have the article
assigned to them.
#. If no-one is found, skip this piece.
#. Advance the "turn", taking into account ``pieces_per_coder``.
If all coders have reached their ``pieces_per_coder`` (in this
assignment round), stop.
"""
log_lines = []
coder_idx_to_count = {}
num_coders = len(coders)
pieces = sample.pieces.all()
if shuffle_pieces_before_assigning:
pieces = list(pieces)
shuffle(pieces)
quit_flag = False
coder_idx = 0
for piece in pieces:
n_piece_assignments = CodingAssignment.objects.filter(
sample=sample, piece=piece).count()
if (limit_to_unassigned and n_piece_assignments):
log_lines.append("%s already assigned to someone, skipping."
% six.text_type(piece))
continue
assign_times = assign_each_piece_n_times
if max_assignments_per_piece is not None:
max_assign_times = assign_times = max(
0,
max_assignments_per_piece
- n_piece_assignments)
assign_times = min(
max_assign_times,
assign_times)
if assign_times == 0:
log_lines.append("Piece '%s' has reached max assignment count, skipping."
% six.text_type(piece))
continue
for i_assignment in range(assign_times):
local_coder_idx = coder_idx
assignment_tries = 0
# was this piece already assigned to this coder? (if so, try next)
# Note that, in its desperation, this may assign a few more items
# to a coder than are technically allowed by their limit.
while (
CodingAssignment.objects.filter(
sample=sample, piece=piece,
coder=coders[local_coder_idx]).count()
and assignment_tries < num_coders):
local_coder_idx = (local_coder_idx + 1) % num_coders
assignment_tries += 1
if assignment_tries >= num_coders:
log_lines.append("Piece '%s' already assigned "
"to all coders, skipping." % six.text_type(piece))
break
assmt = CodingAssignment()
assmt.coder = coders[local_coder_idx]
assmt.piece = piece
assmt.sample = sample
assmt.state = assignment_states.not_started
assmt.latest_state_time = creation_time
assmt.creation_time = creation_time
assmt.creator = creator
assmt.save()
coder_idx_to_count[local_coder_idx] = \
coder_idx_to_count.get(local_coder_idx, 0) + 1
# {{{ advance coder turn
find_coder_tries = 0
while find_coder_tries < num_coders:
coder_idx = (coder_idx + 1) % num_coders
if (
max_pieces_per_coder is None
or coder_idx_to_count.get(coder_idx, 0)
< max_pieces_per_coder):
break
find_coder_tries += 1
if find_coder_tries >= num_coders:
log_lines.append("All coders have reached their item limit, "
"stopping.")
quit_flag = True
break
# }}}
if quit_flag:
break
for coder_idx, coder in enumerate(coders):
log_lines.append("%s: %d new items assigned"
% (coder, coder_idx_to_count.get(coder_idx, 0)))
return log_lines | ffe59dce1b85f7b77e652a1823f298b643d104c7 | 16,789 |
def mask_rcnn_heads_add_mask_rcnn_losses(model, blob_mask):
"""Add Mask R-CNN specific losses."""
loss_mask = model.net.SigmoidCrossEntropyLoss(
[blob_mask, 'masks_int32'],
'loss_mask',
scale=model.GetLossScale() * cfg.MRCNN.WEIGHT_LOSS_MASK
)
loss_gradients = blob_utils_get_loss_gradients(model, [loss_mask])
model.AddLosses('loss_mask')
return loss_gradients | 1f94662948d2576874ca4bb13a602e0a0482d787 | 16,790 |
def parse_ns_headers(ns_headers):
"""Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
"""
known_attrs = ("expires", "domain", "path", "secure",
# RFC 2109 attrs (may turn up in Netscape cookies, too)
"version", "port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
# XXX: The following does not strictly adhere to RFCs in that empty
# names and values are legal (the former will only appear once and will
# be overwritten if multiple occurrences are present). This is
# mostly to deal with backwards compatibility.
for ii, param in enumerate(ns_header.split(';')):
param = param.strip()
key, sep, val = param.partition('=')
key = key.strip()
if not key:
if ii == 0:
break
else:
continue
# allow for a distinction between present and empty and missing
# altogether
val = val.strip() if sep else None
if ii != 0:
lc = key.lower()
if lc in known_attrs:
key = lc
if key == "version":
# This is an RFC 2109 cookie.
if val is not None:
val = strip_quotes(val)
version_set = True
elif key == "expires":
# convert expires date to seconds since epoch
if val is not None:
val = http2time(strip_quotes(val)) # None if invalid
pairs.append((key, val))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result | 91d1006d6495b1ad86ff65abbc1575d9c759f183 | 16,791 |
def same_kind_right_null(a: DataType, _: Null) -> bool:
"""Return whether `a` is nullable."""
return a.nullable | 005e9d62702d8f9c6d1e1a4911c7dedf7d81bb73 | 16,792 |
def unary_col(op, v):
"""
interpretor for executing unary operator expressions on columnars
"""
if op == "+":
return v
if op == "-":
return compute.subtract(0.0, v)
if op.lower() == "not":
return compute.invert(v)
raise Exception("unary op not implemented") | ff4eec1f333cd0425cb1b7c533ec4dc94179512e | 16,793 |
def test_start_sep_graph() -> nx.Graph:
"""test graph with known clique partition that needs start_separate"""
G = nx.Graph()
G.add_nodes_from(range(6))
G.add_edges_from([(0, 1, {'weight': 1.0}), (0, 2, {'weight': -10}), (0, 3, {'weight': 1}), (0, 4, {'weight': -10}), (0, 5, {'weight': -10}),
(1, 2, {'weight': 1.2}), (1, 3, {'weight': -10}), (1, 4, {'weight': -10}), (1, 5, {'weight': -10}),
(2, 3, {'weight': 1}), (2, 4, {'weight': -1}), (2, 5, {'weight': 0.5}),
(3, 4, {'weight': 0.5}), (3, 5, {'weight': -1})])
return G | 84bd5a140ff7c8882513395a305f69d64d1830a7 | 16,794 |
def structure(table_toplevels):
"""
Accepts an ordered sequence of TopLevel instances and returns a navigable object structure representation of the
TOML file.
"""
table_toplevels = tuple(table_toplevels)
obj = NamedDict()
last_array_of_tables = None # The Name of the last array-of-tables header
for toplevel in table_toplevels:
if isinstance(toplevel, toplevels.AnonymousTable):
obj[''] = toplevel.table_element
elif isinstance(toplevel, toplevels.Table):
if last_array_of_tables and toplevel.name.is_prefixed_with(last_array_of_tables):
seq = obj[last_array_of_tables]
unprefixed_name = toplevel.name.without_prefix(last_array_of_tables)
seq[-1] = CascadeDict(seq[-1], NamedDict({unprefixed_name: toplevel.table_element}))
else:
obj[toplevel.name] = toplevel.table_element
else: # It's an ArrayOfTables
if last_array_of_tables and toplevel.name != last_array_of_tables and \
toplevel.name.is_prefixed_with(last_array_of_tables):
seq = obj[last_array_of_tables]
unprefixed_name = toplevel.name.without_prefix(last_array_of_tables)
if unprefixed_name in seq[-1]:
seq[-1][unprefixed_name].append(toplevel.table_element)
else:
cascaded_with = NamedDict({unprefixed_name: [toplevel.table_element]})
seq[-1] = CascadeDict(seq[-1], cascaded_with)
else:
obj.append(toplevel.name, toplevel.table_element)
last_array_of_tables = toplevel.name
return obj | c34590f604d52ff4bfcf3cf1bae1fc41a7a1f3ec | 16,795 |
import math
def h(q):
"""Binary entropy func"""
if q in {0, 1}:
return 0
return (q * math.log(1 / q, 2)) + ((1 - q) * math.log(1 / (1 - q), 2)) | ad3d02d6e7ddf622c16ec8df54752ac5c77f8972 | 16,796 |
def has_next_page(page_info: dict) -> bool:
"""
Extracts value from a dict with hasNextPage key, raises an error if the key is not available
:param page_info: pagination info
:return: a bool indicating if response hase a next page
"""
has_next_page = page_info.get('hasNextPage')
if has_next_page is None:
raise KeyNotFoundException('hasNextPage key not available')
else:
return has_next_page | 13c7bf0096127e054adaa8a331d2168bfb76c1d3 | 16,797 |
def _stuw_code(current_name=None):
""""
Zoekt door TYPESTUW naar de naam van het stuwtype, geeft attribuut waarde uit DAMO
"""
if current_name not in TYPESTUW.values():
return 99
for i, name in TYPESTUW.items():
if name == current_name:
return i | f0444885fd9956bdb150442dc1de7de09a0ac693 | 16,798 |
def _build_init_nodes(context, device):
"""
Build initial inputs for beam search algo
"""
decoder_input = _prepare_init_inputs(context, device)
root_node = BeamSearchNode(None, None, decoder_input, 0, len(context))
return [root_node] | 009cf7b09f39eb5c9722015d310ecab0b32f7c59 | 16,799 |
import typing
def compute_accuracy(data):
"""Return [wpm, accuracy]."""
prompted_text = data["promptedText"][0]
typed_text = data.get("typedText", [""])[0]
start_time = float(data["startTime"][0])
end_time = float(data["endTime"][0])
return [typing.wpm(typed_text, end_time - start_time),
typing.accuracy(typed_text, prompted_text)] | c10b5d681392c71967b86f12d33be3edc1361446 | 16,802 |
from typing import IO
def write_file(filename: str, content: str, mode: str = "w") -> IO:
"""Save content to a file, overwriting it by default."""
with open(filename, mode) as file:
file.write(content)
return file | 5d6b7ac1f9097d00ae2b67e3d34f1135c4e90946 | 16,803 |
def get_minimum_integer_attribute_value(node, attribute_name):
"""
Returns the minimum value that a specific integer attribute has set
:param node: str
:param attribute_name: str
:return: float
"""
return maya.cmds.attributeQuery(attribute_name, min=True, node=node)[0] | ce36c252478e9cb5d5e5ade3e2d70716d206748a | 16,804 |
import numpy as np
import yt
import string
def get_star_locs(plotfile):
"""Given a plotfile, return the location of the primary and the secondary."""
ds = yt.load(plotfile)
# Get a numpy array corresponding to the density.
problo = ds.domain_left_edge.v
probhi = ds.domain_right_edge.v
dim = ds.domain_dimensions
dx = (probhi - problo) / dim
dens = (ds.covering_grid(level=0, left_edge=[0.0, 0.0, 0.0], dims=ds.domain_dimensions)['density']).v
# Calculate the orbital parameters
M_solar = 1.99e33
Gconst = 6.67e-8
M_P = 0.90
M_S = 0.60
M_P = M_P * M_solar
M_S = M_S * M_solar
# Get a numpy array corresponding to the density.
a = (Gconst * (M_P + M_S) * rot_period**2 / (4.0 * np.pi**2))**(1.0/3.0)
a_2 = a / (1 + M_S / M_P)
a_1 = (M_S / M_P) * a_2
# Guess the locations of the stars based on perfect circular rotation
f = open(plotfile + '/job_info', 'r')
for line in f:
if string.find(line, "rotational_period") > 0:
rot_period = float(string.split(line, "= ")[1])
break
f.close()
t = (ds.current_time).v
center = (probhi + problo) / 2.0
loc_P = [-a_1 * np.cos(2 * np.pi * t / rot_period) + center[0], -a_1 * np.sin(2 * np.pi * t / rot_period) + center[1], 0.0 + center[2]]
loc_S = [ a_2 * np.cos(2 * np.pi * t / rot_period) + center[0], a_2 * np.sin(2 * np.pi * t / rot_period) + center[1], 0.0 + center[2]]
loc_P = np.array(loc_P)
loc_S = np.array(loc_S)
# Create an array of the zone positions
x = problo[0] + dx[0] * (np.arange(dim[0]) + 0.5e0)
y = problo[1] + dx[1] * (np.arange(dim[1]) + 0.5e0)
z = problo[2] + dx[2] * (np.arange(dim[2]) + 0.5e0)
xx, yy, zz = np.meshgrid(x, y, z, indexing="ij")
rr = (xx**2 + yy**2 + zz**2)**0.5
# Now what we'll do is to split up the grid into two parts.
# zones that are closer to the primary's expected location and
# zones that are closer to the secondary's expected location.
rr_P = ( (xx - loc_P[0])**2 + (yy - loc_P[1])**2 + (zz - loc_P[2])**2 )**0.5
rr_S = ( (xx - loc_S[0])**2 + (yy - loc_S[1])**2 + (zz - loc_S[2])**2 )**0.5
P_idx = np.where( rr_P < rr_S )
S_idx = np.where( rr_S < rr_P )
# Now, do a center of mass sum on each star.
xx_P_com = np.sum( dens[P_idx] * xx[P_idx] ) / np.sum(dens[P_idx])
yy_P_com = np.sum( dens[P_idx] * yy[P_idx] ) / np.sum(dens[P_idx])
zz_P_com = np.sum( dens[P_idx] * zz[P_idx] ) / np.sum(dens[P_idx])
xx_S_com = np.sum( dens[S_idx] * xx[S_idx] ) / np.sum(dens[S_idx])
yy_S_com = np.sum( dens[S_idx] * yy[S_idx] ) / np.sum(dens[S_idx])
zz_S_com = np.sum( dens[S_idx] * zz[S_idx] ) / np.sum(dens[S_idx])
return [xx_P_com, yy_P_com, zz_P_com, xx_S_com, yy_S_com, zz_S_com] | 429758abd92d4eff7a1948278bbe8c348ba83862 | 16,805 |
def get_list(_list, persistent_attributes):
"""
Check if the user supplied a list and if its a custom list, also check for for any saved lists
:param _list: User supplied list
:param persistent_attributes: The persistent attribs from the app
:return: The list name , If list is custom or not
"""
if _list is not None and (_list.lower() != 'watchlist' and _list.lower() != 'watch list'):
return _list, True
else:
# if default isnt set use watchlist
if "list" in persistent_attributes:
if persistent_attributes["list"] != 'watchlist' and persistent_attributes["list"] != 'watch list':
_list = persistent_attributes["list"]
_usecustomlist = True
else:
_list = 'watchlist'
_usecustomlist = False
else:
_list = 'watchlist'
_usecustomlist = False
return _list, _usecustomlist | 497fa8427660bafa3cc3023abf0132973693dc6e | 16,806 |
import socket
import re
def inode_for_pid_sock(pid, addr, port):
"""
Given a pid that is inside a network namespace, and the address/port of a LISTEN socket,
find the inode of the socket regardless of which pid in the ns it's attached to.
"""
expected_laddr = '%02X%02X%02X%02X:%04X' % (addr[3], addr[2], addr[1], addr[0], socket.htons(port))
for line in open('/proc/{}/net/tcp'.format(pid), 'r').readlines():
parts = re.split(r'\s+', line.strip())
local_addr = parts[1]
remote_addr = parts[2]
if remote_addr != '00000000:0000': continue # not a listen socket
if local_addr == expected_laddr:
return int(parts[9]) | 4d47d9de118caa87854b96bf759a75520b8409cb | 16,807 |
from typing import List
from typing import Tuple
import logging
def get_edges_from_route_matrix(route_matrix: Matrix) -> List[Tuple]:
"""Returns a list of the edges used in a route according to the route matrix
:param route_matrix: A matrix indicating which edges contain the optimal route
:type route_matrix: Matrix
:return: The row and column for the edge in the matrix
:rtype: Tuple
:yield: List of tuples for each edge connecting two nodes
:rtype: List[Tuple]
"""
def get_first_row(route_matrix):
for row in range(len(route_matrix)):
nodes_in_row = sum(route_matrix[row])
if nodes_in_row == 1:
return row
elif nodes_in_row == 0:
continue
else:
raise ValueError(f'Invalid number of nodes in row: {nodes_in_row}')
def get_next_node_from_row(i, route_matrix):
for j in range(len(route_matrix)):
if route_matrix[i][j] == 1:
return (i, j)
raise ValueError(f"Node {i} is not connected to another node.")
edges = []
route_length = np.sum(route_matrix)
row = get_first_row(route_matrix)
while len(edges) < route_length:
try:
to_node = get_next_node_from_row(row, route_matrix)
row = to_node[1]
edges.append(to_node)
except ValueError:
logging.info('End of open route found.')
# transpose the matrix
route_matrix = [[route_matrix[j][i] for j in range(len(route_matrix))] for i in range(len(route_matrix))]
# reverse the edges
edges = [(edges[-1][1], edges[-1][0])]
row = edges[0][1]
return edges | 32e84bc782cdf3939affa881f0c2cf23ff81eeee | 16,808 |
def nicer(string):
"""
>>> nicer("qjhvhtzxzqqjkmpb")
True
>>> nicer("xxyxx")
True
>>> nicer("uurcxstgmygtbstg")
False
>>> nicer("ieodomkazucvgmuy")
False
"""
pair = False
for i in range(0, len(string) - 3):
for j in range(i + 2, len(string) - 1):
if string[i:i + 2] == string[j:j + 2]:
pair = True
break
if not pair:
return False
for i in range(0, len(string) - 2):
if string[i] == string[i + 2]:
return True
return False | 7c543bbd39730046b1ab3892727cca3a9e027662 | 16,809 |
from typing import Union
def multiple_choice(value: Union[list, str]):
""" Handle a single string or list of strings """
if isinstance(value, list):
# account for this odd [None] value for empty multi-select fields
if value == [None]:
return None
# we use string formatting to handle the possibility that the list contains ints
return ", ".join([f"{val}" for val in value])
return value | aae54f84bc1ccc29ad9ad7ae205e130f66601131 | 16,810 |
def Jnu_vD82(wav):
"""Estimate of ISRF at optical wavelengths by van Dishoeck & Black (1982)
see Fig 1 in Heays et al. (2017)
Parameters
----------
wav : array of float
wavelength in angstrom
Returns
-------
Jnu : array of float
Mean intensity Jnu in cgs units
"""
if wav is not None and not isinstance(wav, au.quantity.Quantity):
wav = (wav*au.angstrom).to(au.angstrom)
else:
wav = wav.to(au.angstrom)
w = wav.value
return 2.44e-16*w**2.7/au.cm**2/au.s/au.Hz | 287dbf88d7a5ba58ca8792cd78ff61393df3aae2 | 16,811 |
def _coexp_ufunc(m0, exp0, m1, exp1):
""" Returns a co-exp couple of couples """
# Implementation for real
if (m0 in numba_float_types) and (m1 in numba_float_types):
def impl(m0, exp0, m1, exp1):
co_m0, co_m1 = m0, m1
d_exp = exp0 - exp1
if m0 == 0.:
exp = exp1
elif m1 == 0.:
exp = exp0
elif (exp1 > exp0):
co_m0 = _exp2_shift(co_m0, d_exp)
exp = exp1
elif (exp0 > exp1):
co_m1 = _exp2_shift(co_m1, -d_exp)
exp = exp0
else: # exp0 == exp1
exp = exp0
return (co_m0, co_m1, exp)
# Implementation for complex
elif (m0 in numba_complex_types) or (m1 in numba_complex_types):
def impl(m0, exp0, m1, exp1):
co_m0, co_m1 = m0, m1
d_exp = exp0 - exp1
if m0 == 0.:
exp = exp1
elif m1 == 0.:
exp = exp0
elif (exp1 > exp0):
co_m0 = (_exp2_shift(co_m0.real, d_exp)
+ 1j * _exp2_shift(co_m0.imag, d_exp))
exp = exp1
elif (exp0 > exp1):
co_m1 = (_exp2_shift(co_m1.real, -d_exp)
+ 1j * _exp2_shift(co_m1.imag, -d_exp))
exp = exp0
else: # exp0 == exp1
exp = exp0
return (co_m0, co_m1, exp)
else:
raise TypingError("datatype not accepted {}{}".format(m0, m1))
return impl | 11df0f4c06edb758945b7a86940edd4975c47c85 | 16,812 |
def get_lorem(length=None, **kwargs):
""" Get a text (based on lorem ipsum.
:return str:
::
print get_lorem() # -> atque rerum et aut reiciendis...
"""
lorem = ' '.join(g.get_choices(LOREM_CHOICES))
if length:
lorem = lorem[:length]
return lorem | a3ece5c011d69e0a532bcb4b91fa6583dd028c1d | 16,813 |
import warnings
def try_get_graphql_scalar_type(property_name, property_type_id):
"""Return the matching GraphQLScalarType for the property type id or None if none exists."""
maybe_graphql_type = ORIENTDB_TO_GRAPHQL_SCALARS.get(property_type_id, None)
if not maybe_graphql_type:
warnings.warn(
'Ignoring property "{}" with unsupported property type: '
"{}".format(property_name, PROPERTY_TYPE_ID_TO_NAME[property_type_id])
)
return maybe_graphql_type | 70c4406b9cd08b3de6e48a473e62869470f579b1 | 16,814 |
import requests
def get(path):
"""Get GCE metadata value."""
attribute_url = (
'http://{}/computeMetadata/v1/'.format(_METADATA_SERVER) + path)
headers = {'Metadata-Flavor': 'Google'}
operations_timeout = environment.get_value('URL_BLOCKING_OPERATIONS_TIMEOUT')
response = requests.get(
attribute_url, headers=headers, timeout=operations_timeout)
response.raise_for_status()
return response.text | 044db931369de13e6c16db9007fe4bad28a940a8 | 16,815 |
def greedy_helper(hyper_list, node_dict, fib_heap, total_weight, weight=None):
"""
Greedy peeling algorithm. Peel nodes iteratively based on their current degree.
Parameters
----------
G: undirected, graph (networkx)
node_dict: dict, node id as key, tuple (neighbor list, heap node) as value. Here heap node is a
pointer to the corresponding node in fibheap.
fibheap: FibonacciHeap, support fast extraction of min degree node and value change.
total_weight: edge weight sum.
weight: str that specify the edge attribute name of edge weight; None if the graph is unweighted.
Returns
----------
H: list, subset of nodes corresponding to densest subgraph.
max_avg: float, density of H induced subgraph.
new_loads: dict, new loads for nodes, only used for the flowless algorithm when T>1.
"""
n = len(node_dict.keys())
avg_degree = total_weight / n
H = list(node_dict.keys())
max_avg = avg_degree
new_loads = dict()
for i in range(n - 1):
# find min node from graph (remove from heap)
to_remove = fib_heap.extract_min()
node_to_remove = to_remove.value
degree_to_remove = to_remove.key
new_loads[node_to_remove] = degree_to_remove
for e_index in node_dict[node_to_remove][0]:
e = hyper_list[e_index]
for neighbor in e:
if neighbor != node_to_remove:
fib_heap.decrease_key(node_dict[neighbor][1], node_dict[neighbor][1].key - 1)
node_dict[neighbor][0].remove(e_index)
total_weight -= 1
del node_dict[node_to_remove]
avg_degree = total_weight / (n - i - 1)
if max_avg < avg_degree:
max_avg = avg_degree
H = list(node_dict.keys())
return H, max_avg, new_loads | b2c0f3e91e6c9a80a8396dc104abc804af8875e5 | 16,816 |
def CleanFloat(number, locale = 'en'):
"""\
Return number without decimal points if .0, otherwise with .x)
"""
try:
if number % 1 == 0:
return str(int(number))
else:
return str(float(number))
except:
return number | 03ccc3bfe407becf047515b618621058acff37e7 | 16,817 |
def ssd_bboxes_encode(boxes):
"""
Labels anchors with ground truth inputs.
Args:
boxex: ground truth with shape [N, 5], for each row, it stores [y, x, h, w, cls].
Returns:
gt_loc: location ground truth with shape [num_anchors, 4].
gt_label: class ground truth with shape [num_anchors, 1].
num_matched_boxes: number of positives in an image.
"""
def jaccard_with_anchors(bbox):
"""Compute jaccard score a box and the anchors."""
# Intersection bbox and volume.
ymin = np.maximum(y1, bbox[0])
xmin = np.maximum(x1, bbox[1])
ymax = np.minimum(y2, bbox[2])
xmax = np.minimum(x2, bbox[3])
w = np.maximum(xmax - xmin, 0.)
h = np.maximum(ymax - ymin, 0.)
# Volumes.
inter_vol = h * w
union_vol = vol_anchors + (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) - inter_vol
jaccard = inter_vol / union_vol
return np.squeeze(jaccard)
pre_scores = np.zeros((config.num_ssd_boxes), dtype=np.float32)
t_boxes = np.zeros((config.num_ssd_boxes, 4), dtype=np.float32)
t_label = np.zeros((config.num_ssd_boxes), dtype=np.int64)
for bbox in boxes:
label = int(bbox[4])
scores = jaccard_with_anchors(bbox)
idx = np.argmax(scores)
scores[idx] = 2.0
mask = (scores > matching_threshold)
mask = mask & (scores > pre_scores)
pre_scores = np.maximum(pre_scores, scores * mask)
t_label = mask * label + (1 - mask) * t_label
for i in range(4):
t_boxes[:, i] = mask * bbox[i] + (1 - mask) * t_boxes[:, i]
index = np.nonzero(t_label)
# Transform to tlbr.
bboxes = np.zeros((config.num_ssd_boxes, 4), dtype=np.float32)
bboxes[:, [0, 1]] = (t_boxes[:, [0, 1]] + t_boxes[:, [2, 3]]) / 2
bboxes[:, [2, 3]] = t_boxes[:, [2, 3]] - t_boxes[:, [0, 1]]
# Encode features.
bboxes_t = bboxes[index]
default_boxes_t = default_boxes[index]
bboxes_t[:, :2] = (bboxes_t[:, :2] - default_boxes_t[:, :2]) / (default_boxes_t[:, 2:] * config.prior_scaling[0])
tmp = np.maximum(bboxes_t[:, 2:4] / default_boxes_t[:, 2:4], 0.000001)
bboxes_t[:, 2:4] = np.log(tmp) / config.prior_scaling[1]
bboxes[index] = bboxes_t
num_match = np.array([len(np.nonzero(t_label)[0])], dtype=np.int32)
return bboxes, t_label.astype(np.int32), num_match | 1e0a07c1305fe2b1ba99f535609d2d52d72befa8 | 16,818 |
def _get_partial_prediction(input_data: dt.BatchedTrainTocopoData,
target_data_token_ids: dt.NDArrayIntBO,
target_data_is_target_copy: dt.NDArrayBoolBOV,
target_data_is_target_pointer: dt.NDArrayBoolBOV
) -> dt.BatchedTrainTocopoData:
"""Create BatchedTrainTocopoData that contains the latest predictions.
This function creates BatchedTrainTocopoData for the autoregressive
prediction. The returned batched_partial_prediction contains the prediction
made so far by the autoregressive prediction, notebly
BatchedTrainTocopoTargetData.token_ids,
BatchedTrainTocopoTargetData.is_target_copy and
BatchedTrainTocopoTargetData.is_target_pointer. batched_partial_prediction
should be used by the autoregressive prediction to generate the next
prediction.
Args:
input_data: The input data that we generate the autoregressive prediction.
We used it copy the BatchedTrainGraphNodeData and
BatchedTrainGraphEdgeData. But BatchedTrainTocopoTargetData should not be
copied from the input data since it contains the ground truth.
target_data_token_ids: Token ids that the autoregressive prediction
predicted so far.
target_data_is_target_copy: is_target_copy matrix that the
autoregressive prediction predicted so far.
target_data_is_target_pointer: is_target_pointer that the
autoregressive prediction predicted so far.
Returns:
A instance of BatchedTrainTocopoData, where the BatchedTrainGraphNodeData
and BatchedTrainGraphEdgeData is the same as input_data. But
BatchedTrainTocopoTargetData holds the prediction made so far.
"""
# BatchedTrainTocopoTargetData contains the latest prediction.
# We must not copy from input_data, but rather use the target_data_token_ids,
# target_data_is_target_copy and target_data_is_target_pointer that are
# predicted by the autoregressive prediction.
batched_partial_prediction_tocopo_target_data = (
dt.BatchedTrainTocopoTargetData(
token_ids=target_data_token_ids,
is_target_copy=target_data_is_target_copy,
is_target_pointer=target_data_is_target_pointer))
# BatchedTrainGraphNodeData and BatchedTrainGraphEdgeData is the same as the
# input_data.
batched_partial_prediction_graph_node_data = dt.BatchedTrainGraphNodeData(
token_ids=input_data.node_data.token_ids,
type_ids=input_data.node_data.type_ids,
token_positions=input_data.node_data.token_positions,
pointer_candidates=input_data.node_data.pointer_candidates
)
batched_partial_prediction_graph_edge_data = dt.BatchedTrainGraphEdgeData(
edges=input_data.edge_data.edges,
time_edges=input_data.edge_data.time_edges)
batched_partial_prediction = dt.BatchedTrainTocopoData(
node_data=batched_partial_prediction_graph_node_data,
edge_data=batched_partial_prediction_graph_edge_data,
target_data=batched_partial_prediction_tocopo_target_data
)
return batched_partial_prediction | 1a0fdc53e4e49bf3d0c0824eca6ba381d7a72f1f | 16,819 |
from tqdm import tqdm_notebook as tqdm
from tqdm import tqdm
from tqdm import tqdm as tqdm
def get_energy_spectrum_old(udata, x0=0, x1=None, y0=0, y1=None,
z0=0, z1=None, dx=None, dy=None, dz=None, nkout=None,
window=None, correct_signal_loss=True, remove_undersampled_region=True,
cc=1.75, notebook=True):
"""
DEPRECATED: TM cleaned up the code, and improved the literacy and transparency of the algorithm- TM (Sep 2020)
Returns 1D energy spectrum from velocity field data
... The algorithm implemented in this function is VERY QUICK because it does not use the two-point autorcorrelation tensor.
... Instead, it converts u(kx, ky, kz)u*(kx, ky, kz) into u(kr)u*(kr). (here * dentoes the complex conjugate)
... CAUTION: Must provide udata with aspect ratio ~ 1
...... The conversion process induces unnecessary error IF the dimension of u(kx, ky, kz) is skewed.
...... i.e. Make udata.shape like (800, 800), (1024, 1024), (512, 512) for accurate results.
... KNOWN ISSUES:
...... This function returns a bad result for udata with shape like (800, 800, 2)
Parameters
----------
udata: nd array
epsilon: nd array or float, default: None
dissipation rate used for scaling energy spectrum
If not given, it uses the values estimated using the rate-of-strain tensor
nu: flaot, viscosity
x0: int
index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1].
x1: int
index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1].
y0: int
index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1].
y1: int
index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1].
t0: int
index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1].
t1: int
index to specify a portion of data in which autocorrelation funciton is computed. Use data u[y0:y1, x0:x1, t0:t1].
dx: float
spacing in x
dy: float
spacing in y
dz: float
spacing in z
nkout: int, default: None
number of bins to compute energy/dissipation spectrum
notebook: bool, default: True
Use tqdm.tqdm_notebook if True. Use tqdm.tqdm otherwise
window: str
Windowing reduces undesirable effects due to the discreteness of the data.
A wideband window such as 'flattop' is recommended for turbulent energy spectra.
For the type of applying window function, choose from below:
boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall, barthann,
kaiser (needs beta), gaussian (needs standard deviation), general_gaussian (needs power, width),
slepian (needs width), chebwin (needs attenuation), exponential (needs decay scale),
tukey (needs taper fraction)
correct_signal_loss: bool, default: True
If True, it would compensate for the loss of the signals due to windowing.
Always recommended to obtain accurate spectral densities.
remove_undersampled_region: bool, default: True
If True, it will not sample the region with less statistics.
cc: float, default: 1.75
A numerical factor to compensate for the signal loss due to approximations.
... cc=1.75 was obtained from the JHTD data.
Returns
-------
e_k: numpy array
Energy spectrum with shape (number of data points, duration)
e_k_err: numpy array
Energy spectrum error with shape (number of data points, duration)
kk: numpy array
Wavenumber with shape (number of data points, duration)
"""
print('get_energy_spectrum_old(): is DEPRECATED since 09/01/20')
print('... Still works perfectly. Yet, TM highly recommends to use the updated function: get_energy_spectrum()')
if notebook:
print('Using tqdm_notebook. If this is a mistake, set notebook=False')
else:
def delete_masked_elements(data, mask):
"""
Deletes elements of data using mask, and returns a 1d array
Parameters
----------
data: N-d array
mask: N-d array, bool
Returns
-------
compressed_data
"""
data_masked = ma.array(data, mask=mask)
compressed_data = data_masked.compressed()
'...Reduced data using a given mask'
return compressed_data
def convert_nd_spec_to_1d(e_ks, ks, nkout=None, cc=1.75):
"""
Convert the results of get_energy_spectrum_nd() into a 1D spectrum
... This is actually a tricky problem.
Importantly, this will output the SPECTRAL DENSITY
not power which is integrated spectral density (i.e.- spectral density * delta_kx * delta_ky * delta_ky.)
... Ask Takumi for derivation. The derivation goes like this.
...... 1. Start with the Parseval's theorem.
...... 2. Write the discretized equation about the TKE: Average TKE = sum deltak * E(k)
...... 3. Using 1, write down the avg TKE
...... 4. Equate 2 and 3. You get e_k1d * jacobian / (n_samples * deltak)
...... IF deltak = deltakr where deltakr = np.sqrt(deltakx**2 + deltaky**2) for 2D
...... where e_k1d is just a histogram value obtained from the DFT result (i.e. POWER- spectral density integrated over a px)
...... 5. Finally, convert this into the SPECTRAL DENSITY. This is two-fold.
...... 5.1.
...... e_k1d * jacobian / (n_samples * deltak) is not necessarily the correct density
...... if deltak is not equal to deltakr.
...... This is because e_k1d comes from the histogram of the input velocity field.
...... One can show that the correction is just (deltak / deltakr) ** dim
...... 5.2
...... After 5.1, this is finally the integrated power between k and k + deltak
...... Now divide this by deltak to get the spectral density.
Parameters
----------
e_ks
ks
nkout
d: int/float, DIMENSION OF THE FLOW (NOT DIMENSION OF AVAILABLE VELOCITY FIELD)
... For 3D turbulence, d = 3
... d is equal to 3 even if udata is an 2D field embedded in an actual 3D field,
... For 2D turbulence, d = 2
Returns
-------
"""
dim = ks.shape[0]
duration = e_ks.shape[-1]
if dim == 2:
deltakx, deltaky = ks[0, 0, 1] - ks[0, 0, 0], \
ks[1, 1, 0] - ks[1, 0, 0]
e_ks *= deltakx * deltaky # use the raw DFT outputs (power=integrated density over a px)
deltakr = np.sqrt(deltakx ** 2 + deltaky ** 2) # radial k spacing of the velocity field
dx, dy = 2.*np.pi / ks[0, 0, 0] * -0.5, 2.*np.pi / ks[1, 0, 0] * -0.5
if dim == 3:
deltakx, deltaky, deltakz = ks[0, 0, 1, 0] - ks[0, 0, 0, 0], \
ks[1, 1, 0, 0] - ks[1, 0, 0, 0], \
ks[2, 0, 0, 1] - ks[2, 0, 0, 0]
e_ks *= deltakx * deltaky * deltakz # use the raw DFT outputs (power=integrated density over a px)
deltakr = np.sqrt(deltakx ** 2 + deltaky ** 2 + deltakz ** 2) # radial k spacing of the velocity field
dx, dy, dz = 2.*np.pi / ks[0, 0, 0] * -0.5, 2.*np.pi / ks[1, 0, 0] * -0.5, 2.*np.pi / ks[2, 0, 0] * -0.5
kk = np.zeros((ks.shape[1:]))
for i in range(dim):
kk += ks[i, ...] ** 2
kk = np.sqrt(kk) # radial k
if nkout is None:
nkout = int(np.max(ks.shape[1:]) * 0.8)
shape = (nkout, duration)
e_k1ds = np.empty(shape)
e_k1d_errs = np.empty(shape)
k1ds = np.empty(shape)
if remove_undersampled_region:
kx_max, ky_max = np.nanmax(ks[0, ...]), np.nanmax(ks[1, ...])
k_max = np.nanmin([kx_max, ky_max])
if dim == 3:
kz_max = np.nanmax(ks[2, ...])
k_max = np.nanmin([k_max, kz_max])
for t in range(duration):
# flatten arrays to feed to binned_statistic\
kk_flatten, e_knd_flatten = kk.flatten(), e_ks[..., t].flatten()
if remove_undersampled_region:
mask = np.abs(kk_flatten) > k_max
kk_flatten = delete_masked_elements(kk_flatten, mask)
e_knd_flatten = delete_masked_elements(e_knd_flatten, mask)
# get a histogram
k_means, k_edges, binnumber = binned_statistic(kk_flatten, kk_flatten, statistic='mean', bins=nkout)
k_binwidth = (k_edges[1] - k_edges[0])
k1d = k_edges[1:] - k_binwidth / 2
e_k1d, _, _ = binned_statistic(kk_flatten, e_knd_flatten, statistic='mean', bins=nkout)
e_k1d_err, _, _ = binned_statistic(kk_flatten, e_knd_flatten, statistic='std', bins=nkout)
# # WEIGHTED AVERAGE
# ke_k1d, _, _ = binned_statistic(kk_flatten, kk_flatten * e_knd_flatten, statistic='mean', bins=nkout)
# e_k1d = ke_k1d / k1d
# ke_k1d_err, _, _ = binned_statistic(kk_flatten, kk_flatten * e_knd_flatten, statistic='std', bins=nkout)
# e_k1d_err = ke_k1d_err / k1d
# One must fix the power by some numerical factor due to the DFT and the definition of E(k)
n_samples = len(kk_flatten)
deltak = k1d[1] - k1d[0]
if dim == 2:
jacobian = 2 * np.pi * k1d
elif dim == 3:
jacobian = 4 * np.pi * k1d ** 2
# Insert to a big array
# ... A quick derivation of this math is given in the docstring.
k1ds[..., t] = k1d
# OLD stuff
# e_k1ds[..., t] = e_k1d * jacobian / (n_samples * deltak)
# e_k1d_errs[..., t] = e_k1d_err * jacobian / (n_samples * deltak)
# print deltak
# Old stuff 2: scaling that works?
# e_k1ds[..., t] = e_k1d * jacobian / (n_samples * deltak) * (deltak / deltakr) ** dim / deltak
# e_k1d_errs[..., t] = e_k1d_err * jacobian / (n_samples * deltak) * (deltak / deltakr) ** dim / deltak
# print(dx, dy, deltakr, deltakx * dx * ks.shape[2])
print(deltakr, deltak)
# 2019-2020 August
# e_k1ds[..., t] = e_k1d * jacobian / (n_samples * deltakr ** 2) * cc
# e_k1d_errs[..., t] = e_k1d_err * jacobian / (n_samples * deltakr ** 2) * cc
# # Update in Aug, 2020- TM
e_k1ds[..., t] = e_k1d * jacobian / (n_samples * deltakr ** 2) * cc
e_k1d_errs[..., t] = e_k1d_err * jacobian / (n_samples * deltakr ** 2) * cc
return e_k1ds, e_k1d_errs, k1ds
dim, duration = len(udata), udata.shape[-1]
e_ks, ks = get_energy_spectrum_nd_old(udata, x0=x0, x1=x1, y0=y0, y1=y1, z0=z0, z1=z1, dx=dx, dy=dy, dz=dz,
window=window, correct_signal_loss=correct_signal_loss)
e_k, e_k_err, kk = convert_nd_spec_to_1d(e_ks, ks, nkout=nkout, cc=cc)
# #### NORMALIZATION IS NO LONGER NEEDED #### - Takumi, Apr 2019
# # normalization
# energy_avg, energy_avg_err = get_spatial_avg_energy(udata, x0=x0, x1=x1, y0=y0, y1=y1, z0=z0, z1=z1)
#
# for t in range(duration):
# I = np.trapz(e_k[0:, t], kk[0:, t])
# print I
# N = I / energy_avg[t] # normalizing factor
# e_k[:, t] /= N
# e_k_err[:, t] /= N
if notebook:
return e_k, e_k_err, kk | aa29358215897f3bcb630d2c62b679d2b6ebef88 | 16,820 |
def createDefaultClasses(datasetTXT):
"""
:param datasetTXT: dict with text from txt files indexed by filename
:return: Dict with key:filename, value:list of lists with classes per sentence in the document
"""
classesDict = {}
for fileName in datasetTXT:
classesDict[fileName] = []
sentences = nltkSentenceSplit(datasetTXT[fileName], verbose=False)
for sentence in sentences:
sentence = nltkTokenize(sentence)
classesDict[fileName].append([int(0) for _ in sentence])
return classesDict | 8bec5768710a929c21f75fa70865e25f340409f6 | 16,821 |
def getGlobals():
"""
:return: (dict)
"""
return globals() | 0fa230d341ba5435b33c9e6a9d9f793f99a74238 | 16,822 |
from typing import Iterable
from typing import List
def split_text_to_words(words: Iterable[str]) -> List[Word]:
"""Transform split text into list of Word."""
return [Word(word, len(word)) for word in words] | 6317e794a5397da44be96216308573ae9d5a788f | 16,823 |
import khorosjx
def init_module_operation():
"""This function imports the primary modules for the package and returns ``True`` when successful."""
khorosjx.init_module('admin', 'content', 'groups', 'spaces', 'users')
return True | d6cbc3b94d4b4005d301d9b597bb7086e211bfa2 | 16,824 |
def connect_to_rds(aws, region):
"""
Return boto connection to the RDS in the specified environment's region.
"""
set_progress('Connecting to AWS RDS in region {0}.'.format(region))
wrapper = aws.get_api_wrapper()
client = wrapper.get_boto3_client(
'rds',
aws.serviceaccount,
aws.servicepasswd,
region
)
return client | cdfaa984c6795c7e03f0d8b3e3620f6de757fcbb | 16,825 |
def export_graphviz(DecisionTreeClassificationModel, featureNames=None, categoryNames=None, classNames=None,
filled=True, roundedCorners=True, roundLeaves=True):
"""
Generates a DOT string out of a Spark's fitted DecisionTreeClassificationModel, which
can be drawn with any library capable of handling the DOT format.
If you want to plot in a single step, please use the function plot_tree().
Arguments:
DecisionTreeClassificationModel -- a pyspark.ml.classification.DecisionTreeClassificationModel
instance
featureNames -- a list with the feature names. This
is probably the same list you usually
pass to your VectorAssembler constructor
categoryNames -- a dictionary with the featureNames that
are categorical as the keys, and the
different categories as the values.
This is probably the featureNames as key,
StringIndexerModel.labels attribute as value
for each categorical feature
classNames -- a list with the class names for your target
column. This is probably the StringIndexerModel.labels
for your target column
filled -- boolean which indicates whether to fill nodes with colour
or not. Color gamma will be the prediction class for each
node, and color intensity the impurity at such node
roundedCorners -- boolean which indicates whether to round
rectangle corners for the nodes
roundLeaves -- boolean which indicates whether to represent leaf
nodes as ellipses rather than rectangles
Returns:
a DOT string ready to be processed by any DOT handling library
"""
tree_dict = loads(generate_tree_json(DecisionTreeClassificationModel, withNodeIDs=False))
num_classes = get_num_classes(tree_dict)
color_brew = generate_color_brew(num_classes)
node_list = []
tree_dict_with_id = add_node_ids(tree_dict)
graph = relations_to_str(tree_dict_with_id,
featureNames=featureNames,
categoryNames=categoryNames,
classNames=classNames,
numClasses=num_classes,
nodeList=node_list,
filled=filled,
roundLeaves=roundLeaves,
color_brew=color_brew)
node_properties = "\n".join(node_list)
filled_and_rounded = []
if filled:
filled_and_rounded.append("filled")
if roundedCorners:
filled_and_rounded.append("rounded")
dot_string = """digraph Tree {
node [shape=box style="%s"]
subgraph body {
%s
%s}
}""" % (",".join(filled_and_rounded), "".join(graph), node_properties)
return dot_string | eb4484136fbbe92537a3f030375f6ac80081befd | 16,826 |
def _get_next_sequence_values(session, base_mapper, num_values):
"""Fetches the next `num_values` ids from the `id` sequence on the `base_mapper` table.
For example, if the next id in the `model_id_seq` sequence is 12, then
`_get_next_sequence_values(session, Model.__mapper__, 5)` will return [12, 13, 14, 15, 16].
"""
assert _has_normal_id_primary_key(
base_mapper
), "_get_next_sequence_values assumes that the sequence produces integer values"
id_seq_name = _get_id_sequence_name(base_mapper)
# Table.schema is the canonical place to get the name of the schema.
# See https://docs.sqlalchemy.org/en/13/core/metadata.html#sqlalchemy.schema.Table.params.schema
schema = base_mapper.entity.__table__.schema
sequence = sqlalchemy.Sequence(id_seq_name, schema=schema)
# Select the next num_values from `sequence`
raw_ids = tuples_to_scalar_list(
session.connection().execute(
sqlalchemy.select([sequence.next_value()]).select_from(
sqlalchemy.text("generate_series(1, :num_values)")
),
{"num_values": num_values},
)
)
assert len(raw_ids) == num_values, u"Expected to get {} new ids, instead got {}".format(
num_values, len(raw_ids)
)
# session.execute returns `long`s since Postgres sequences use `bigint` by default.
# However, we need ints since the column type for our primary key is `integer`.
return [int(id_) for id_ in raw_ids] | 63ad9e5e55228dd873ee2c5d9080d223c89e1bc6 | 16,827 |
def overview(request):
"""
Dashboard: Process overview page.
"""
responses_dict = get_data_for_user(request.user)
responses_dict_by_step = get_step_responses(responses_dict)
# Add step status dictionary
step_status = get_step_completeness(responses_dict_by_step)
responses_dict_by_step['step_status'] = step_status
responses_dict_by_step['active_page'] = 'overview'
responses_dict_by_step['derived'] = get_derived_data(responses_dict)
# Dashnav needs filing option to determine which steps to show
for question in responses_dict_by_step['signing_filing']:
responses_dict_by_step[question['question_id']] = question['value']
response = render(request, 'overview.html', context=responses_dict_by_step)
# set this session variable after the page is already rendered
request.session['viewed_dashboard_during_session'] = True
return response | 4ac165cf5b4bf7de6f060d6649935f25fcf5a0a9 | 16,828 |
def _guess_os():
"""Try to guess the current OS"""
try:
abi_name = ida_typeinf.get_abi_name()
except:
abi_name = ida_nalt.get_abi_name()
if "OSX" == abi_name:
return "macos"
inf = ida_idaapi.get_inf_structure()
file_type = inf.filetype
if file_type in (ida_ida.f_ELF, ida_ida.f_AOUT, ida_ida.f_COFF):
return "linux"
elif file_type == ida_ida.f_MACHO:
return "macos"
elif file_type in (
ida_ida.f_PE,
ida_ida.f_EXE,
ida_ida.f_EXE_old,
ida_ida.f_COM,
ida_ida.f_COM_old,
):
return "windows"
else:
# Default
return "linux"
#raise UnhandledOSException("Unrecognized OS type") | bb2cb2f0c294f2554ec419ee1bdea665abaf6957 | 16,829 |
def create_conf(name, address, *services):
"""Create an Apple TV configuration."""
atv = conf.AppleTV(name, address)
for service in services:
atv.add_service(service)
return atv | 0326a4c21b39ef12fe916f3a3fbee34af52c12a2 | 16,830 |
def log_transform(x):
""" Log transformation from total precipitation in mm/day"""
tp_max = 23.40308390557766
y = np.log(x*(np.e-1)/tp_max + 1)
return y | 61783d103db36ed668e494f557550caef611b84a | 16,831 |
from datetime import datetime
import requests
import json
def get_flight(arguments):
"""
connects to skypicker servive and get most optimal flight base on search criteria
:param arguments: inputs arguments from parse_arg
:return dict: flight
"""
api_url = 'https://api.skypicker.com/flights?v=3&'
adults = '1'
# convert time format 2018-04-13 -> 13/04/2018
date = datetime.datetime.strptime(arguments.date, "%Y-%m-%d").strftime("%d/%m/%Y")
fly_from = arguments.origin
fly_to = arguments.to
sort = arguments.sort
if arguments.days_in_destination == 'oneway':
# constructing search query for ONEWAY flight
type_flight = 'oneway'
query_string = '&flyFrom=' + fly_from + \
'&to=' + fly_to + \
'&dateFrom=' + date + \
'&dateTo=' + date + \
'&typeFlight=' + type_flight + \
'&adults=' + adults + \
'&sort=' + sort + \
'&asc=1'
else:
# constructing search query for RETURN flight
days_in_destination = arguments.days_in_destination
type_flight = 'round'
query_string = 'daysInDestinationFrom=' + days_in_destination + \
'&daysInDestinationTo=' + days_in_destination + \
'&flyFrom=' + fly_from + \
'&to=' + fly_to + \
'&dateFrom=' + date + \
'&dateTo=' + date + \
'&typeFlight=' + type_flight + \
'&adults=' + adults + \
'&sort=' + sort + \
'&asc=1'
if arguments.verbose:
print(query_string)
get_data = requests.get(api_url + query_string)
json_data = json.loads(get_data.content)
flights = json_data['data']
# return first flight in the sorted list
if arguments.verbose:
print(flights[0])
return flights[0] | 690b7bd170b8b83f4b83f5c0ce98da919134107c | 16,832 |
def use_ip_alt(request):
"""
Fixture that gives back 2 instances of UseIpAddrWrapper
1) use ip4, dont use ip6
2) dont use ip4, use ip6
"""
use_ipv4, use_ipv6 = request.param
return UseIPAddrWrapper(use_ipv4, use_ipv6) | c33d74b6888124413d1430e4873140475db4748e | 16,833 |
import torch
def radius_gaussian(sq_r, sig, eps=1e-9):
"""Compute a radius gaussian (gaussian of distance)
Args:
sq_r: input radiuses [dn, ..., d1, d0]
sig: extents of gaussians [d1, d0] or [d0] or float
Returns:
gaussian of sq_r [dn, ..., d1, d0]
"""
return torch.exp(-sq_r / (2 * sig**2 + eps)) | cd5bb2bb85641b1200ce67cb7eb52bc1705cd0a1 | 16,834 |
from typing import List
from typing import Dict
from typing import Any
def index_papers_to_geodata(papers: List[Paper]) -> Dict[str, Any]:
"""
:param papers: list of Paper
:return: object
"""
geodata = {}
for paper in papers:
for file in paper.all_files():
for location in file.locations.all():
if location.id not in geodata:
geodata[location.id] = {
"id": location.id,
"name": location.description,
"coordinates": location.geometry,
"papers": {},
}
if paper.id not in geodata[location.id]["papers"]:
if paper.paper_type:
paper_type = paper.paper_type.paper_type
else:
paper_type = _("Paper")
geodata[location.id]["papers"][paper.id] = {
"id": paper.id,
"name": paper.name,
"type": paper_type,
"url": reverse("paper", args=[paper.id]),
"files": [],
}
geodata[location.id]["papers"][paper.id]["files"].append(
{
"id": file.id,
"name": file.name,
"url": reverse("file", args=[file.id]),
}
)
return geodata | f892d84e3dc8f239885b5c4110c931b088922bcc | 16,835 |
def _get_all_prefixed_mtds(
prefix: str,
groups: t.Tuple[str, ...],
update_groups_by: t.Optional[t.Union[t.FrozenSet[str],
t.Set[str]]] = None,
prefix_removal: bool = False,
custom_class_: t.Any = None,
) -> t.Dict[str, t.Tuple]:
"""Get all methods prefixed with ``prefix`` in predefined feature ``groups``.
The predefined metafeature groups are inside ``VALID_GROUPS`` attribute.
Args:
prefix (:obj:`str`): gather methods prefixed with this value.
groups (:obj:`Tuple` of :obj:`str`): a tuple of feature group names.
It can assume value :obj:`NoneType`, which is interpreted as ``no
filter`` (i.e. all features of all groups will be returned).
return_groups (:obj:`bool`, optional): if True, then the returned value
will be a :obj:`dict` (instead of a :obj:`tuple`) which maps each
group (as keys) with its correspondent values (as :obj:`tuple`s).
update_groups_by (:obj:`set` of :obj:`str`, optional): values to filter
``groups``. This function also returns a new version of ``groups``
with all its elements that do not contribute with any new method
for the final output. It other words, it is removed any group which
do not contribute to the output of this function. This is particu-
larly useful for precomputations, as it helps avoiding unecessary
precomputation methods from feature groups not related with user
selected features.
prefix_removal (:obj:`bool`, optional): if True, then the returned
method names will not have the ``prefix``.
custom_class_ (Class, optional): used for inner testing purposes. If
not None, the given class will be used as reference to extract
the prefixed methods.
Returns:
If ``filter_groups_by`` argument is :obj:`NoneType` or empty:
tuple: with all filtered methods by ``group``.
Else:
tuple(tuple, tuple): the first field is the output described above,
the second field is a new version of ``groups``, with all ele-
ments that do not contribute with any element listed in the set
``update_groups_by`` removed.
"""
groups = tuple(set(VALID_GROUPS).intersection(groups))
if not groups and custom_class_ is None:
return {"methods": tuple(), "groups": tuple()}
if custom_class_ is None:
verify_groups = tuple(VALID_GROUPS)
verify_classes = tuple(VALID_MFECLASSES)
else:
verify_groups = ("test_methods", )
verify_classes = (custom_class_, )
methods_by_group = {
ft_type_id: get_prefixed_mtds_from_class(
class_obj=mfe_class,
prefix=prefix,
prefix_removal=prefix_removal)
for ft_type_id, mfe_class in zip(verify_groups, verify_classes)
if ft_type_id in groups or custom_class_ is not None
}
gathered_methods = [] # type: t.List[t.Union[str, TypeMtdTuple]]
new_groups = [] # type: t.List[str]
for group_name in methods_by_group:
group_mtds = methods_by_group[group_name]
gathered_methods += group_mtds
if update_groups_by:
group_mtds_names = {
remove_prefix(mtd_pack[0], prefix=MTF_PREFIX)
if not prefix_removal
else mtd_pack[0]
for mtd_pack in group_mtds
}
if not update_groups_by.isdisjoint(group_mtds_names):
new_groups.append(group_name)
ret_val = {
"methods": tuple(gathered_methods),
} # type: t.Dict[str, t.Tuple]
if update_groups_by:
ret_val["groups"] = tuple(new_groups)
return ret_val | 2387fb3f2aa0416ad9837f6c1b4c27488d406fea | 16,836 |
from functools import reduce
def cartesian_product(arrays):
"""Create a cartesian product array from a list of arrays.
It is used to create x-y coordinates array from x and y arrays.
Stolen from stackoverflow
http://stackoverflow.com/a/11146645
"""
broadcastable = np.ix_(*arrays)
broadcasted = np.broadcast_arrays(*broadcastable)
rows, cols = reduce(np.multiply, broadcasted[0].shape), len(broadcasted)
out = np.empty(rows * cols, dtype=broadcasted[0].dtype)
start, end = 0, rows
for a in broadcasted:
out[start:end] = a.reshape(-1)
start, end = end, end + rows
return out.reshape(cols, rows).T | 552b898a9187df637cc5f10b49e6a1fe004af95c | 16,838 |
def advanced_split(string, *symbols, contain=False, linked='right'):
"""
Split a string by symbols
If contain is True, the result will contain symbols
The choice of linked decides symbols link to which adjacent part of the result
"""
if not isinstance(string, str):
raise Exception('String must be str!')
for each in symbols:
if not isinstance(each, str):
raise Exception('Symbol must be str!')
linked = linked.lower()
if linked not in ['left', 'right']:
raise Exception('Linked must be left or right!')
if not len(symbols):
return [string]
result = []
symbols_len = tuple([len(each) for each in symbols])
if contain:
tail = ''
while 1:
index = len(string)
num = -1
for _num, each in enumerate(symbols):
_index = string.find(each)
if _index < index and _index + 1:
index = _index
num = _num
if num == -1:
temp = tail + string if contain and linked == 'right' and tail else string
if temp:
result.append(temp)
break
temp = string[:index]
if contain and linked == 'left':
tail = symbols[num]
if contain:
if tail:
if linked == 'left':
temp = temp + tail
if linked == 'right':
temp = tail + temp
if contain and linked == 'right':
tail = symbols[num]
string = string[index+symbols_len[num]:]
if temp:
result.append(temp)
return result | 3e46fcc0c3fa6ab99b9d4d45cf950d9ad3f03ac1 | 16,839 |
def _get_resource_info(
resource_type="pod",
labels={},
json_path=".items[0].metadata.name",
errors_to_ignore=("array index out of bounds: index 0",),
verbose=False,
):
"""Runs 'kubectl get <resource_type>' command to retrieve info about this resource.
Args:
resource_type (string): "pod", "service", etc.
labels (dict): (eg. {'name': 'phenotips'})
json_path (string): a json path query string (eg. ".items[0].metadata.name")
errors_to_ignore (list):
verbose (bool):
Returns:
(string) resource value (eg. "postgres-410765475-1vtkn")
"""
l_arg = ""
if labels:
l_arg = "-l" + ",".join(["%s=%s" % (key, value) for key, value in labels.items()])
output = run(
"kubectl get %(resource_type)s %(l_arg)s -o jsonpath={%(json_path)s}" % locals(),
errors_to_ignore=errors_to_ignore,
print_command=False,
verbose=verbose,
)
return output.strip('\n') if output is not None else None | b9a98fe469eb7aa5fcfb606db0948cb53410ddec | 16,840 |
def rotate_line_about_point(line, point, degrees):
"""
added 161205
This takes a line and rotates it about a point a certain number of degrees.
For use with clustering veins.
:param line: tuple contain two pairs of x,y values
:param point: tuple of x, y
:param degrees: number of degrees to rotate by
:return: line (now rotated)
"""
# point will serve as axis
axis = point
# unpack line
p0, p1 = line
# and get the line's degrees and length
line_deg = line_to_angle(line)
d = (abs(p0[0] - p1[0]), abs(p0[1] - p1[1]))
line_length = sqrt(d[0] ^ 2 + d[1] ^ 2)
# calculate radius between points and axis
d = (abs(p0[0] - axis[0]), abs(p0[1] - axis[1]))
r0 = sqrt(d[0] ^ 2 + d[1] ^ 2)
# r1 = float((p1[0] - axis[0]) ^ 2 + (p1[1] - axis[1]) ^ 2) ^ 0.5
# find degrees that first line is above x-axis
p0_deg = line_to_angle((axis, p0))
# now rotate line one to be level to degrees
p0_cos = cos(degrees * (pi / 180.0))
p0_sin = sin(degrees * (pi / 180.0))
p0_n = (r0 * p0_cos, r0 * p0_sin)
# and move p1 to be in respect to p0
new_deg = line_deg - p0_deg
# normalize degrees
while new_deg > 360:
new_deg -= 360
while new_deg < 0:
new_deg += 360
# get second point of line now since all variables are known
p1_cos = cos(new_deg * (pi / 180.0))
p1_sin = sin(new_deg * (pi / 180.0))
# get new p1
p1_n = (p1_cos * line_length + p0_n[0], p1_sin * line_length + p0_n[1])
# return new line
return p0_n, p1_n | c5954604d6f7852e66fe7b19f53193271582619d | 16,841 |
def arith_relop(a, t, b):
"""
arith_relop(a, t, b)
This is (arguably) a hack.
Represents each function as an integer 0..5.
"""
return [(t == 0).implies(a < b),
(t == 1).implies(a <= b),
(t == 2).implies(a == b),
(t == 3).implies(a >= b),
(t == 4).implies(a > b),
(t == 5).implies(a != b)
] | 8b06d545e8d651803683b36facafb647f38fb2ff | 16,842 |
import logging
def initialise_framework(options):
"""This function initializes the entire framework
:param options: Additional arguments for the component initializer
:type options: `dict`
:return: True if all commands do not fail
:rtype: `bool`
"""
logging.info("Loading framework please wait..")
# No processing required, just list available modules.
if options["list_plugins"]:
show_plugin_list(db, options["list_plugins"])
finish()
target_urls = load_targets(session=db, options=options)
load_works(session=db, target_urls=target_urls, options=options)
start_proxy()
start_transaction_logger()
return True | e62b34189e330fdaea7ec6c81084616bd015a587 | 16,843 |
def get_registration_form() -> ConvertedDocument:
"""
Вернуть параметры формы для регистрации
:return: Данные формы профиля + Логин и пароль
"""
form = [
gen_field_row('Логин', 'login', 'text', validate_rule='string'),
gen_field_row('Пароль', 'password', 'password'),
gen_field_row('Токен', 'token', 'text', validate_rule='token')
] + convert_mongo_model(Profile)
return form | 76bcab98d840523e94234c456cb1ccbd2b1f9129 | 16,844 |
def get_docker_stats(dut):
"""
Get docker ps
:param dut:
:return:
"""
command = 'docker stats -a --no-stream'
output = st.show(dut, command)
return output | cd994701c622ce9ea1f6f123f24b9913aa02698d | 16,845 |
def enthalpyvap(temp=None,pres=None,dvap=None,chkvals=False,
chktol=_CHKTOL,temp0=None,pres0=None,dvap0=None,chkbnd=False,
mathargs=None):
"""Calculate ice-vapour vapour enthalpy.
Calculate the specific enthalpy of water vapour for ice and water
vapour in equilibrium.
:arg temp: Temperature in K.
:type temp: float or None
:arg pres: Pressure in Pa.
:type pres: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_p` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_t` is used.
:type pres0: float or None
:arg dvap0: Initial guess for the water vapour density in kg/m3. If
None (default) then `_approx_t` or `_approx_p` is used.
:type dvap0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises ValueError: If neither of temp or pres is provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpyvap(temp=270.)
2495132.21977
>>> enthalpyvap(pres=100.)
2463525.19629
"""
temp, pres, dvap = eq_tp(temp=temp,pres=pres,dvap=dvap,chkvals=chkvals,
chktol=chktol,temp0=temp0,pres0=pres0,dvap0=dvap0,chkbnd=chkbnd,
mathargs=mathargs)
hv = flu2.enthalpy(temp,dvap)
return hv | dadc59bf28272de3a298b89cb13901825fd58c95 | 16,848 |
async def get_eng_hw(module: tuple[str, ...], task: str) -> Message:
"""
Стандартный запрос для английского
"""
return await _get_eng_content('zadanie-{}-m-{}-z'.format(*module), task) | 15e5425173c643074dde08c6753ffcd333414565 | 16,849 |
import json
def get_image_blobs(pb):
""" Get an image from the sensor connected to the MicroPython board,
find blobs and return the image, a list of blobs, and the time it
took to find the blobs (in [ms])
"""
raw = json.loads(run_on_board(pb, script_get_image, no_print=True))
img = np.flip(np.transpose(np.reshape(raw, (8, 8))))
time_str = run_on_board(pb, script_get_blob_list, no_print=True)
t_ms = float(time_str.split("= ")[1].split("m")[0])
blobs_str = run_on_board(pb, script_print_blob_list, no_print=True)
blobs_str = blobs_str.replace("nan", "0")
blobs = json.loads(blobs_str.replace('(', '[').replace(')', ']'))
return img, blobs, t_ms | 5d563aeb490c5c1d509e442a3f7210bcfd9d6779 | 16,851 |
def classification_report(y_true, y_pred, digits=2, suffix=False):
"""Build a text report showing the main classification metrics.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a classifier.
digits : int. Number of digits for formatting output floating point values.
Returns:
report : string. Text summary of the precision, recall, F1 score for each class.
Examples:
>>> from seqeval.metrics import classification_report
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> print(classification_report(y_true, y_pred))
precision recall f1-score support
<BLANKLINE>
MISC 0.00 0.00 0.00 1
PER 1.00 1.00 1.00 1
<BLANKLINE>
avg / total 0.50 0.50 0.50 2
<BLANKLINE>
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
name_width = 0
d1 = defaultdict(set)
d2 = defaultdict(set)
for e in true_entities:
d1[e[0]].add((e[1], e[2]))
name_width = max(name_width, len(e[0]))
for e in pred_entities:
d2[e[0]].add((e[1], e[2]))
last_line_heading = 'avg / total'
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n'
ps, rs, f1s, s = [], [], [], []
for type_name, true_entities in d1.items():
pred_entities = d2[type_name]
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
p = 100 * nb_correct / nb_pred if nb_pred > 0 else 0
r = 100 * nb_correct / nb_true if nb_true > 0 else 0
f1 = 2 * p * r / (p + r) if p + r > 0 else 0
report += row_fmt.format(*[type_name, p, r, f1, nb_true], width=width, digits=digits)
ps.append(p)
rs.append(r)
f1s.append(f1)
s.append(nb_true)
report += u'\n'
# compute averages
report += row_fmt.format(last_line_heading,
np.average(ps, weights=s),
np.average(rs, weights=s),
np.average(f1s, weights=s),
np.sum(s),
width=width, digits=digits)
return report | 6158c82879b2894c96479bb96f986e348ef02b00 | 16,852 |
def tidy_conifer(ddf: DataFrame) -> DataFrame:
"""Tidy up the raw conifer output."""
result = ddf.drop(columns=["marker", "identifier", "read_lengths", "kraken"])
result[["name", "taxonomy_id"]] = result["taxa"].str.extract(
r"^(?P<name>[\w ]+) \(taxid (?P<taxonomy_id>\d+)\)$", expand=True
)
return result.drop(columns=["taxa"]).categorize(
columns=["name", "taxonomy_id"], index=False
) | 88e55855d5f9ca8859a0e058a593aadd44774387 | 16,853 |
import collections
def get_duplicates(lst):
"""Return a list of the duplicate items in the input list."""
return [item for item, count in collections.Counter(lst).items() if count > 1] | 8f10226c904f95efbee447b4da5dc5764b18f6d2 | 16,855 |
def relu(x, alpha=0):
"""
Rectified Linear Unit.
If alpha is between 0 and 1, the function performs leaky relu.
alpha values are commonly between 0.1 and 0.3 for leaky relu.
Parameters
----------
x : numpy array
Values to be activated.
alpha : float, optional
The scale factor for the linear unit.
Typical values are between 0.1 and 0.3.
The default is 0.1.
Returns
-------
z : numpy array
The activated values.
"""
z = x.copy()
z[x < 0] = z[x < 0]*alpha
return z | f18b331ef66d14a29e1ad5f14b610af583ea7b3a | 16,856 |
def build_unique_dict(controls):
"""Build the disambiguated list of controls
Separated out to a different function so that we can get
the control identifiers for printing.
"""
name_control_map = UniqueDict()
# collect all the possible names for all controls
# and build a list of them
for ctrl in controls:
ctrl_names = get_control_names(ctrl, controls)
# for each of the names
for name in ctrl_names:
name_control_map[name] = ctrl
return name_control_map | 931b90a34e151550c399b314d368a54e3c816796 | 16,857 |
def serialize_thrift_object(thrift_obj, proto_factory=Consts.PROTO_FACTORY):
"""Serialize thrift data to binary blob
:param thrift_obj: the thrift object
:param proto_factory: protocol factory, set default as Compact Protocol
:return: string the serialized thrift payload
"""
return Serializer.serialize(proto_factory(), thrift_obj) | f6845b7539da82dc0555e11b0013db034d297e70 | 16,858 |
def _add_noise(audio, snr):
"""
Add complex gaussian noise to signal with given SNR.
:param audio(np.array):
:param snr(float): sound-noise-ratio
:return: audio with added noise
"""
audio_mean = np.mean(audio**2)
audio_mean_db = 10 * np.log10(audio_mean)
noise_mean_db = snr - audio_mean_db
noise_mean = 10 ** (noise_mean_db/10)
return audio + np.random.normal(0, np.sqrt(noise_mean), len(audio)) | 4f77e7a2893dc0bdcaf5e170c5e17371127b80d5 | 16,861 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.