content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import string
def generate_create_account_key():
"""
Generates a random account creation key. Implementation is very similar to
generate_reset_key().
"""
chars = string.ascii_lowercase + string.digits
return misc_utils.generate_random_string(constants.CREATE_ACCOUNT_KEY_LENGTH,
chars=chars) | e52405a325b787b9473da5530c909bfdcff0d9b4 | 14,800 |
import re
def parse_dblife(file):
"""Parse an DBLife file, returning a tuple:
positions: list of (x,y) co-ordinates
comments: all comments in file, as a list of strings, one per line.
"""
lines = file.split("\n")
comments = []
positions = []
x = 0
y = 0
dblife_pattern = r"((\d*)(\.|O|o|\*))*"
for line in lines:
line = line.strip().rstrip()
if line.startswith("!"):
comments.append(line[2:])
# check if this is part of the pattern
if re.match(dblife_pattern, line):
count = 0
for char in line:
# repeat counts
if char.isdigit():
count *= 10
count += int(char)
# blanks
if char in ".":
if count != 0:
x += int(count)
else:
x += 1
count = 0
# ons
if char in "oO*":
if count != 0:
for i in range(count):
positions.append((x, y))
x += 1
else:
positions.append((x, y))
x += 1
count = 0
count = 0
# newlines
y += 1
x = 0
count = 0
return positions, comments | b2d54240280b657c82d8a70da9e9f0ce47a92c7a | 14,801 |
from typing import Any
from typing import Callable
def db_handle_error(logger: Logger, default_return_val: Any) \
-> Any:
"""Handle operational database errors via decorator."""
def decorator(func: Callable) -> Any:
def wrapper(*args, **kwargs): # type: ignore
# Bypass attempt to perform query and just return default value
is_db_disabled: bool = app_config.get(
'BROWSE_DISABLE_DATABASE') or False
if is_db_disabled:
if logger:
logger.info(
'Database is disabled per BROWSE_DISABLE_DATABASE')
return default_return_val
try:
return func(*args, **kwargs)
except NoResultFound:
return default_return_val
except (OperationalError, DBAPIError) as ex:
if logger:
logger.warning(
f'Error executing query in {func.__name__}: {ex}')
return default_return_val
except Exception as ex:
if logger:
logger.warning(
f'Unknown exception in {func.__name__}: {ex}')
raise
return wrapper
return decorator | 1a807bc7a49abc9b50970145c520e823103f3607 | 14,802 |
from typing import Iterable
from typing import Optional
from typing import List
from pathlib import Path
def climb_directory_tree(starting_path: PathOrStr, file_patterns: Iterable[str]) -> Optional[List[Path]]:
"""Climb the directory tree looking for file patterns."""
current_dir: Path = Path(starting_path).absolute()
if current_dir.is_file():
current_dir = current_dir.parent
while current_dir.root != str(current_dir):
for root_file in file_patterns:
found_files = list(current_dir.glob(root_file))
if found_files:
return found_files
current_dir = current_dir.parent
return None | 80f036da4cf5564a2b96359ea67db19602333420 | 14,803 |
def serve_file(request, token, require_requester=True, verify_requester=True, signer=None):
"""Basic view to serve a file.
Uses ``evaluate_request`` under the hood. Please refer to that function to view information about exceptions.
:param request: the file request
:type request: bgfiles.models.FileRequest
:param token: the token
:type token: str
:param require_requester: whether we expect the token to contain the request
:type require_requester: bool
:param verify_requester: whether we need to verify the current user is the requester
:type verify_requester: bool
:param signer: signer to use
:return: django.http.HTTPResponse
"""
file_request, data = evaluate_request(request, token, require_requester=require_requester,
verify_requester=verify_requester, signer=signer)
return toolbox.serve(file_request) | 98bfae971e141130e94932afb8fdee2a285f2a5a | 14,804 |
def d2_rho_heterodyne(t, rho_vec, A, args):
"""
Need to cythonize, docstrings
"""
M = A[0] + A[3]
e1 = cy_expect_rho_vec(M, rho_vec, 0)
d1 = spmv(M, rho_vec) - e1 * rho_vec
M = A[0] - A[3]
e1 = cy_expect_rho_vec(M, rho_vec, 0)
d2 = spmv(M, rho_vec) - e1 * rho_vec
return [1.0 / np.sqrt(2) * d1, -1.0j / np.sqrt(2) * d2] | 6628c1a7299ee7842a839fd63b00857808bcd3ec | 14,805 |
from pathlib import Path
def get_venv():
"""Return virtual environment path or throw an error if not found"""
env = environ.get("VIRTUAL_ENV", None)
if env:
return Path(env)
else:
raise EnvironmentError("No virtual environment found.") | 44dd4660198a8f5538cbe91ffe52468adc8ee0e8 | 14,806 |
from typing import List
from typing import Dict
from typing import Tuple
from typing import Union
from pathlib import Path
import os
def parse_pascal_voc_anno(
anno_path: str, labels: List[str] = None, keypoint_meta: Dict = None
) -> Tuple[List[AnnotationBbox], Union[str, Path], np.ndarray]:
""" Extract the annotations and image path from labelling in Pascal VOC format.
Args:
anno_path: the path to the annotation xml file
labels: list of all possible labels, used to compute label index for each label name
keypoint_meta: meta data of keypoints which should include at least
"labels".
Return
A tuple of annotations, the image path and keypoints. Keypoints is a
numpy array of shape (N, K, 3), where N is the number of objects of the
category that defined the keypoints, and K is the number of keypoints
defined in the category. `len(keypoints)` would be 0 if no keypoints
found.
"""
anno_bboxes = []
keypoints = []
tree = ET.parse(anno_path)
root = tree.getroot()
# get image path from annotation. Note that the path field might not be set.
anno_dir = os.path.dirname(anno_path)
if root.find("path") is not None:
im_path = os.path.realpath(
os.path.join(anno_dir, root.find("path").text)
)
else:
im_path = os.path.realpath(
os.path.join(anno_dir, root.find("filename").text)
)
# extract bounding boxes, classification and keypoints
objs = root.findall("object")
for obj in objs:
label = obj.find("name").text
# Get keypoints if any.
# For keypoint detection, currently only one category (except
# background) is allowed. We assume all annotated objects are of that
# category.
if keypoint_meta is not None:
kps = []
kps_labels = keypoint_meta["labels"]
# Assume keypoints are available
kps_annos = obj.find("keypoints")
if kps_annos is None:
raise Exception(f"No keypoints found in {anno_path}")
assert set([kp.tag for kp in kps_annos]).issubset(
kps_labels
), "Incompatible keypoint labels"
# Read keypoint coordinates: [x, y, visibility]
# Visibility 0 means invisible, non-zero means visible
for name in kps_labels:
kp_anno = kps_annos.find(name)
if kp_anno is None:
# return 0 for invisible keypoints
kps.append([0, 0, 0])
else:
kps.append(
[
int(float(kp_anno.find("x").text)),
int(float(kp_anno.find("y").text)),
1,
]
)
keypoints.append(kps)
# get bounding box
bnd_box = obj.find("bndbox")
left = int(bnd_box.find("xmin").text)
top = int(bnd_box.find("ymin").text)
right = int(bnd_box.find("xmax").text)
bottom = int(bnd_box.find("ymax").text)
# Set mapping of label name to label index
if labels is None:
label_idx = None
else:
label_idx = labels.index(label)
anno_bbox = AnnotationBbox.from_array(
[left, top, right, bottom],
label_name=label,
label_idx=label_idx,
im_path=im_path,
)
assert anno_bbox.is_valid()
anno_bboxes.append(anno_bbox)
return anno_bboxes, im_path, np.array(keypoints) | 5bb8131f359fc3a50db03dd4dad9845e81b3c25c | 14,807 |
def load_user(user_id):
"""Login manager load user method."""
return User.query.get(int(user_id)) | 40d5f35aa88163a6ab69c1da7bad6634225f2cf3 | 14,808 |
def test_interpolate_energy_dispersion():
"""Test of interpolation of energy dispersion matrix using a simple dummy model."""
x = [0.9, 1.1]
y = [8., 11.5]
n_grid = len(x) * len(y)
n_offset = 1
n_en = 30
n_mig = 20
clip_level = 1.e-3
# define simple dummy bias and resolution model using two parameters x and y
def get_bias_std(i_en, x, y):
i_en = i_en + 3 * ((x - 1) + (y - 10.))
de = n_en - i_en
de[de < 0] = 0.
bias = de**0.5 + n_mig / 2
rms = 5 - 2 * (i_en / n_en)
bias[i_en < 3] = 2 * n_mig # return high values to zero out part of the table
rms[i_en < 3] = 0
return bias, rms
en = np.arange(n_en)[:, np.newaxis]
mig = np.arange(n_mig)[np.newaxis, :]
# auxiliary function to compute profile of the 2D distribution
# used to check if the expected and interpolated matrixes are similar
def calc_mean_std(matrix):
n_en = matrix.shape[0]
means = np.empty(n_en)
stds = np.empty(n_en)
for i_en in np.arange(n_en):
w = matrix[i_en, :]
if np.sum(w) > 0:
means[i_en] = np.average(mig[0, :], weights=w)
stds[i_en] = np.sqrt(np.cov(mig[0, :], aweights=w))
else: # we need to skip the empty columns
means[i_en] = -1
stds[i_en] = -1
return means, stds
# generate true values
interp_pars = (1, 10)
bias, sigma = get_bias_std(en, *interp_pars)
mig_true = np.exp(-(mig - bias)**2 / (2 * sigma**2))
mig_true[mig_true < clip_level] = 0
# generate a grid of migration matrixes
i_grid = 0
pars_all = np.empty((n_grid, 2))
mig_all = np.empty((n_grid, n_en, n_mig, n_offset))
for xx in x:
for yy in y:
bias, sigma = get_bias_std(en, xx, yy)
mig_all[i_grid, :, :, 0] = (np.exp(-(mig - bias)**2 / (2 * sigma**2)))
pars_all[i_grid, :] = (xx, yy)
i_grid += 1
# do the interpolation and compare the results with expected ones
mig_interp = interp.interpolate_energy_dispersion(mig_all, pars_all, interp_pars, method='linear')
# check if all the energy bins have normalization 1 or 0 (can happen because of empty bins)
sums = np.sum(mig_interp[:, :, 0], axis=1)
assert np.logical_or(np.isclose(sums, 0., atol=1.e-5), np.isclose(sums, 1., atol=1.e-5)).min()
# now check if we reconstruct the mean and sigma roughly fine after interpolation
bias0, stds0 = calc_mean_std(mig_true) # true
bias, stds = calc_mean_std(mig_interp[:, :, 0]) # interpolated
# first remove the bins that are empty in true value
idxs = bias0 > 0
bias0 = bias0[idxs]
bias = bias[idxs]
stds0 = stds0[idxs]
stds = stds[idxs]
# allowing for a 0.6 bin size error on the interpolated values
assert np.allclose(bias, bias0, atol=0.6, rtol=0.)
assert np.allclose(stds, stds0, atol=0.6, rtol=0.) | 73c60f2b01d20b6e399dfb15da2c3c4b8622a90c | 14,809 |
def _transpose_list_array(x):
"""Transposes a list matrix
"""
n_dims = len(x)
assert n_dims > 0
n_samples = len(x[0])
rows = [None] * n_samples
for i in range(n_samples):
r = [None] * n_dims
for j in range(n_dims):
r[j] = x[j][i]
rows[i] = r
return rows | 8815526c6485475aeaf791c2b1350449730b94f6 | 14,810 |
def load_businessgroup(request):
""" Business Group Dependent/Chained Dropdown List """
business_type_id = request.GET.get('business_type')
business_group_list = BusinessGroup.objects.filter(
business_type_id=business_type_id).order_by('name')
context = {'business_group_list': business_group_list}
return render(request, 'app_sme12/form_partial/bus_group_dropdown_list_options.html', context) | 8801fbd6ae99ed939721d94bd7f3539b5b050d0a | 14,811 |
def seed_normalization(train_X, train_Y, test_X, testY, nor_method=0, merge=0, column=0):
"""
0 for minmax 1 for standard, 2 for nothing
:param nor_method:
:param merge:是否训练集测试集一起归一化
:return:
"""
# imp_mean = SimpleImputer(missing_values=np.nan, strategy="mean")
imp_mean = KNNImputer(n_neighbors=10,weights="uniform")
train_X = imp_mean.fit_transform(train_X)
test_X = imp_mean.fit_transform(test_X)
if column == 0:
if nor_method == 0:
scaler = MinMaxScaler()
elif nor_method == 1:
scaler = StandardScaler()
elif nor_method == 2:
scaler = Normalizer()
elif nor_method == 3:
scaler = Pipeline([('min_max', MinMaxScaler()),
('standard', StandardScaler())])
else:
return train_X, train_Y, test_X, testY
if merge == 0:
scaler.fit(np.vstack((train_X, test_X)))
train_X = scaler.transform(train_X)
test_X = scaler.transform(test_X)
elif merge == 1:
scaler.fit(train_X)
train_X = scaler.transform(train_X)
test_X = scaler.transform(test_X)
else:
train_X = scaler.fit_transform(train_X)
test_X = scaler.fit_transform(test_X)
#scaler.fit(np.vstack((train_X, test_X)))
return train_X, train_Y, test_X, testY
else:
train_X = train_X.T
x_mean = np.mean(train_X, axis=0)
x_std = np.std(train_X, axis=0)
train_X = (train_X - x_mean) / (x_mean - x_std)
test_X = test_X.T
x_mean = np.mean(test_X, axis=0)
x_std = np.std(test_X, axis=0)
test_X = (test_X - x_mean) / (x_mean - x_std)
return train_X.T, train_Y, test_X.T, testY | bae1181b6cca53444f09a69abaa3958e8500f71c | 14,812 |
import pathlib
def combine_matrix_runs(path, runs, pacc_file):
"""Combine a set of transition matrix files.
Args:
path: The base path containing the data to combine.
runs: The list of runs to combine.
pacc_file: The name of the file to combine.
Returns:
A TransitionMatrix object with the combined data.
"""
true_path = pathlib.Path(path)
return combine_matrices([read_matrix(true_path / run / pacc_file)
for run in runs]) | dee47a3f4bfb6229a5c6aec531a0b50df5275a0b | 14,813 |
import json
def get_pkg_descr(package, version=None, last_modified=None):
"""
Get package description from registry
"""
json_data = fetch_page('http://registry.npmjs.org/%s' % package, last_modified=last_modified)
if json_data is None: # NB: empty string is not None but will fail the check
return None
else:
return json.loads(json_data) | 9be485ee3e63f25da995b6d454a8dd15de4b7a66 | 14,814 |
def has_pattern(str_or_strlist):
"""When passed a string, equivalent to calling looks_like_pattern.
When passed a string list, returns True if any one of the strings looks like a pattern,
False otherwise."""
strlist = [str_or_strlist] if isinstance(str_or_strlist, str) else str_or_strlist
return len([s for s in strlist if looks_like_pattern(s)]) > 0 | 902069f01a59b5e42c25635271dc27375732437b | 14,815 |
def update_hidden_area(*args):
"""update_hidden_area(hidden_area_t ha) -> bool"""
return _idaapi.update_hidden_area(*args) | 19739a98283203ece9f29d4fe073633318c0c2a4 | 14,816 |
def after_update_forecast_datasets(msg, config, checklist):
"""Calculate the list of workers to launch after the
update_forecast_datasets worker ends.
:arg msg: Nowcast system message.
:type msg: :py:class:`nemo_nowcast.message.Message`
:arg config: :py:class:`dict`-like object that holds the nowcast system
configuration that is loaded from the system configuration
file.
:type config: :py:class:`nemo_nowcast.config.Config`
:arg dict checklist: System checklist: data structure containing the
present state of the nowcast system.
:returns: Worker(s) to launch next
:rtype: list
"""
next_workers = {
"crash": [],
"failure fvcom forecast": [],
"failure nemo forecast": [],
"failure nemo forecast2": [],
"failure wwatch3 forecast": [],
"failure wwatch3 forecast2": [],
"success fvcom forecast": [],
"success nemo forecast": [],
"success nemo forecast2": [],
"success wwatch3 forecast": [],
"success wwatch3 forecast2": [],
}
if msg.type.startswith("success"):
model = msg.type.split()[1]
run_type = msg.type.split()[2]
try:
run_date = checklist[f"{model.upper()} run"][run_type]["run date"]
except KeyError:
# FVCOM run has model config prefixed to run type
run_date = checklist[f"{model.upper()} run"][f"x2 {run_type}"]["run date"]
next_workers[msg.type].append(
NextWorker("nowcast.workers.ping_erddap", args=[f"{model}-forecast"])
)
if model == "nemo":
next_workers[msg.type].extend(
[
NextWorker(
"nowcast.workers.make_plots",
args=["nemo", run_type, "publish", "--run-date", run_date],
),
NextWorker(
"nowcast.workers.make_surface_current_tiles",
args=[run_type, "--run-date", run_date],
),
]
)
return next_workers[msg.type] | 3ef9a6d37f871900e96f6227fea2f7678843acca | 14,817 |
def index(request):
"""Homepage for this app.
"""
with open('index.html') as fp:
return HttpResponse(fp.read()) | b9ce38f59443e38e5d27ff7f153a834e1c11b429 | 14,818 |
def SECH(*args) -> Function:
"""
The SECH function returns the hyperbolic secant of an angle.
Learn more: https//support.google.com/docs/answer/9116560
"""
return Function("SECH", args) | 594921375aaa7d4fb409e1a4792a6752f81b6bb2 | 14,819 |
def read_ATAC_10x(matrix, cell_names='', var_names='', path_file=''):
"""
Load sparse matrix (including matrices corresponding to 10x data) as AnnData objects.
read the mtx file, tsv file coresponding to cell_names and the bed file containing the variable names
Parameters
----------
matrix: sparse count matrix
cell_names: optional, tsv file containing cell names
var_names: optional, bed file containing the feature names
Return
------
AnnData object
"""
mat = mmread(''.join([path_file, matrix]))
mat = mat.toarray()
mat = np.matrix(mat.transpose())
with open(path_file+cell_names) as f:
barcodes = f.readlines()
barcodes = [x[:-1] for x in barcodes]
with open(path_file+var_names) as f:
var_names = f.readlines()
var_names = ["_".join(x[:-1].split('\t')) for x in var_names]
adata = ad.AnnData(mat, obs=pd.DataFrame(index=barcodes), var=pd.DataFrame(index=var_names))
adata.uns['omic'] = 'ATAC'
return(adata) | 9f2073d7582f93db2f714f401fc0fb5e0762a2fc | 14,820 |
def get_html_subsection(name):
"""
Return a subsection as HTML, with the given name
:param name: subsection name
:type name: str
:rtype: str
"""
return "<h2>{}</h2>".format(name) | 2e0f37a7bb9815eda24eba210d8518e64595b9b7 | 14,821 |
def compute_norms(items):
"""
Compute the norms of the item vectors provided.
Arguments:
items -- a hashmap which maps itemIDs to the characteristic vectors
"""
norms = {}
for item in items:
norms[item] = np.sqrt(np.sum(np.square(items[item])))
return norms | ff0a805b6a143b7b52c653226b69aed8319eb5ce | 14,822 |
def do_part_1():
"""
Solves part 1
"""
digested_lines = list(map(digest_line, input_lines(2)))
# Poor man's partial
doubles = sum(map(lambda l: contains_nple(l, reps=2), digested_lines))
triples = sum(map(lambda l: contains_nple(l, reps=3), digested_lines))
print(doubles * triples)
return doubles * triples | 75fa72804d8721b4332d74f00c0bea4d82bcdd02 | 14,823 |
import torch
def create_Rz_batch(a):
"""
Creates a batch of rotation matrices about z of angles a.
Input (batch)
Output (batch, 3, 3)
"""
return torch.stack([
torch.stack([torch.cos(a),
torch.sin(a),
torch.zeros_like(a)],
dim=1),
torch.stack([-torch.sin(a),
torch.cos(a),
torch.zeros_like(a)],
dim=1),
torch.stack([torch.zeros_like(a),
torch.zeros_like(a),
torch.ones_like(a)],
dim=1)
], dim=2) | 7abed1ef608c9985605096679d28c86f5fabab8e | 14,824 |
import torch
def get_upsample_filter(size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
filter = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
return torch.from_numpy(filter).float() | 8c286e6c20f3400c5206f3f15514a65dc8f3b0b5 | 14,825 |
import math
def lst2gmst(longitude,
hour,
minute=None,
second=None,
longitudeDirection='W',
longitudeUnits='DEGREES'):
"""
Converts Local Sidereal Time to Greenwich Mean Sidereal Time.
Parameters
----------
longitude : float (any numeric type)
The longitude of the site to calculate the Local Sidereal Time. Defaults are
Longitude WEST and units DEGREES, but these can be changed with the optional
parameters lonDirection and lonUnits.
hour : int (or float)
If an integer, the function will expect a minute and second. If a float, it
will ignore minute and second and convert from decimal hours to hh:mm:ss.
minute : int
Ignored if hour is a float.
second : int (any numeric type, to include microseconds)
Ignored if hour is a float.
longitudeDirection : string
Default is longitude WEST, 'W', but you can specify EAST by passing 'E'.
longitudeUnits : string
Default units are 'DEGREES', but this can be switched to radians by passing
'RADIANS' in this parameter.
Returns
-------
hour : int
The hour of the calculated GMST
minute : int
The minutes of the calculated GMST
second: float
The seconds of the calculated GMST
Examples
--------
>>> lst2gmst(70.3425, hour=14, minute=26, second=18)
(19, 7, 40.20000000000607)
>>> lst2gmst(5.055477, hour=14.4383333333333333, longitudeDirection='E', longitudeUnits='RADIANS')
(19, 7, 40.20107388985991)
"""
if minute != None and second != None:
hours = sex2dec(hour, minute, second)
elif minute == None and second == None:
hours = hour
else:
raise AssertionError('minute and second must either be both set, or both unset.')
if longitudeUnits.upper() == 'DEGREES':
longitudeTime = longitude / 15.0
elif longitudeUnits.upper() == 'RADIANS':
longitudeTime = longitude * 180.0 / math.pi / 15.0
if longitudeDirection.upper() == 'W':
gmst = hours + longitudeTime
elif longitudeDirection.upper() == 'E':
gmst = hours - longitudeTime
else:
raise AssertionError('longitudeDirection must be W or E')
gmst = gmst % 24.0
return dec2sex(gmst) | 4e651dde2b5dadb1af5d00bc1813272190f07cdf | 14,826 |
import ast
def filter_funcs(node) -> bool:
"""Filter to get functions names and remove dunder names"""
if not isinstance(node, ast.FunctionDef):
return False
elif node.name.startswith('__') or node.name.endswith('__'):
return False
else:
return True | 022181afa887965af0f2d4c5ec33de07b8a3c089 | 14,827 |
from ptvsd.server import api
def attach(address, log_dir=None, multiprocess=True):
"""Starts a DAP (Debug Adapter Protocol) server in this process,
and connects it to the IDE that is listening for an incoming
connection on a socket with the specified address.
address must be a (host, port) tuple, as defined by the standard
socket module for the AF_INET address family.
If specified, log_dir must be a path to some existing directory;
the debugger will then create its log files in that directory.
A separate log file is created for every process, to accommodate
scenarios involving multiple processes. The log file for a process
with process ID <pid> will be named "ptvsd_<pid>.log".
If multiprocess is true, ptvsd will also intercept child processes
spawned by this process, inject a debug server into them, and
configure it to attach to the same IDE before the child process
starts running any user code.
This function doesn't return until connection to the IDE has been
established.
"""
return api.attach(address, log_dir) | aa36e20df5b7d4ad5eb769987ab6e36d368afbb7 | 14,828 |
from typing import Optional
def create_api_token(
creator_id: UserID,
permissions: set[PermissionID],
*,
description: Optional[str] = None,
) -> ApiToken:
"""Create an API token."""
num_bytes = 40
token = token_urlsafe(num_bytes)
db_api_token = DbApiToken(
creator_id, token, permissions, description=description
)
db.session.add(db_api_token)
db.session.commit()
return _db_entity_to_api_token(db_api_token) | 044d041bd013cb5b0ebc9c534b8c0162c3996172 | 14,829 |
import argparse
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Update OCFL inventory sidecar file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("path", type=str, nargs="*",
help="OCFL inventory files or directories containing them")
parser.add_argument("--digest", default=None,
help="Digest algorithm to use overriding any in inventory")
ocfl.add_shared_args(parser)
args = parser.parse_args()
ocfl.check_shared_args(args)
return args | 4e280ba0471a9bf215871d00e77ae2c4b9f41ebb | 14,830 |
from typing import Tuple
def match_image_widths(
image_i1: Image, image_i2: Image
) -> Tuple[Image, Image, Tuple[float, float], Tuple[float, float]]:
"""Automatically chooses the target width (larger of the two inputs), and
scales both images to that width.
Args:
image_i1: 1st image to match width.
image_i2: 2nd image to match width.
Returns:
Scaled image_i1.
Scaled image_i2.
Scaling factor (W, H) for image_i1.
Scaling factor (W, H) for image_i2.
"""
max_width = max(image_i1.width, image_i2.width)
# scale image_i1
new_width = int(max_width)
new_height = int(image_i1.height * new_width / image_i1.width)
scale_factor_i1 = (new_width / image_i1.width, new_height / image_i1.height)
scaled_image_i1 = resize_image(image_i1, new_height, new_width)
# scale image_i2
new_width = int(max_width)
new_height = int(image_i2.height * new_width / image_i2.width)
scale_factor_i2 = (new_width / image_i2.width, new_height / image_i2.height)
scaled_image_i2 = resize_image(image_i2, new_height, new_width)
return scaled_image_i1, scaled_image_i2, scale_factor_i1, scale_factor_i2 | 86f043a3069202b2dfc3eb24f9ac10e9077b2237 | 14,831 |
from typing import Optional
from typing import Union
from typing import Any
from typing import Dict
import copy
def get_parameter_value_and_validate_return_type(
domain: Optional[Domain] = None,
parameter_reference: Optional[Union[Any, str]] = None,
expected_return_type: Optional[Union[type, tuple]] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> Optional[Any]:
"""
This method allows for the parameter_reference to be specified as an object (literal, dict, any typed object, etc.)
or as a fully-qualified parameter name. In either case, it can optionally validate the type of the return value.
"""
if isinstance(parameter_reference, dict):
parameter_reference = dict(copy.deepcopy(parameter_reference))
parameter_reference = get_parameter_value(
domain=domain,
parameter_reference=parameter_reference,
variables=variables,
parameters=parameters,
)
if expected_return_type is not None:
if not isinstance(parameter_reference, expected_return_type):
raise ge_exceptions.ProfilerExecutionError(
message=f"""Argument "{parameter_reference}" must be of type "{str(expected_return_type)}" \
(value of type "{str(type(parameter_reference))}" was encountered).
"""
)
return parameter_reference | 9cdd3106a0397a63a13d71b1c0ce5815a41e47ed | 14,832 |
def diff_tags(list_a, list_b):
"""
Return human readable diff string of tags changed between two tag lists
:param list_a: Original tag list
:param list_b: New tag list
:return: Difference string
"""
status_str = text_type("")
tags_added = [tag for tag in list_b if tag not in list_a]
tags_removed = [tag for tag in list_a if tag not in list_b]
if tags_added and tags_removed:
status_str += "added: {0}".format(text_type(tags_added))
status_str += " removed: {0}".format(text_type(tags_removed))
elif tags_added:
status_str += "added: {0}".format(text_type(tags_added))
elif tags_removed:
status_str += "removed: {0}".format(text_type(tags_removed))
if not status_str:
status_str = "no changes required."
return status_str | e9f69bcdee0e2cb6fd260c56f8bbfe5f568afc63 | 14,833 |
import numpy
def distance_on_great_circle(start_point, direction, distance):
"""compute the location of a point a specified distance along a great circle
NOTE: This assumes a spherical earth. The error introduced in the location
is pretty small (~15 km for a 13000 km path), but it totall screws with
the altitude. YOU SHOULD NOT USE THE ALTITUDE COMING OUT OF THIS, ESPECIALLY
IF YOU HAVE ANY MEANGINFUL DISTANCE
Arguments:
start_point: the starting point of the great circle. The direction is
given in a NED frame at this point. Numpy (3,) array in radians, lla
direction: a NED vector indicating the direction of the great circle
distance: the length of the great circle arc (m)
Returns:
end_point: the end of a great circle path of length <distance> from
<start_point> with initial <direction>
"""
start_xyz = geodesy.conversions.lla_to_xyz(start_point)
direction = geometry.conversions.to_unit_vector(direction)
delta_xyz = geodesy.conversions.ned_to_xyz(
direction, numpy.array(start_point, ndmin=2))
rotation_axis = -geometry.conversions.to_unit_vector(
numpy.cross(start_xyz, delta_xyz))
rotation_magnitude = distance / environments.earth.constants['r0']
rotation_quaternion = geometry.quaternion.Quaternion()
rotation_quaternion.from_axis_and_rotation(
rotation_axis, rotation_magnitude)
end_point_xyz = rotation_quaternion.rot(start_xyz)
end_point = geodesy.conversions.xyz_to_lla(end_point_xyz)
return end_point | e39c62435c208cb2ea4e951b91b641cfbfcd45a8 | 14,834 |
def construct_tree_framework(bracket):
"""Given the tree in bracket form, creates a tree with labeled leaves
and unlabeled inner nodes."""
if type(bracket)==int: #base case, creates leaf
return Node(tree)
else: #recursive step, inner nodes
root = Node(None, construct_tree_framework(bracket[0]), construct_tree_framework(bracket[1]))
return root | a54651fcc5604f46985b11d0d783c76f4368a9d0 | 14,835 |
def eckart_transform(atommasses, atomcoords):
"""Compute the Eckart transform.
This transform is described in https://gaussian.com/vib/.
Parameters
----------
atommasses : array-like
Atomic masses in atomic mass units (amu).
atomcoords : array-like
Atomic coordinates.
Returns
-------
array-like
Examples
--------
>>> from overreact import _datasets as datasets
>>> data = datasets.logfiles["tanaka1996"]["Cl·@UMP2/cc-pVTZ"]
>>> eckart_transform(data.atommasses, data.atomcoords)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> data = datasets.logfiles["symmetries"]["dihydrogen"]
>>> eckart_transform(data.atommasses, data.atomcoords)
array([[...]])
>>> data = datasets.logfiles["symmetries"]["water"]
>>> eckart_transform(data.atommasses, data.atomcoords)
array([[-9.42386999e-01, 0.00000000e+00, 0.00000000e+00,
2.99716727e-01, -2.86166258e-06, -7.42376895e-02,
-1.19022276e-02, 4.33736541e-03, -1.28081683e-01],
[-0.00000000e+00, -9.42386999e-01, 0.00000000e+00,
1.40934586e-02, -1.34562803e-07, 1.01850683e-01,
-1.52466204e-01, -2.78628770e-01, -2.13218735e-02],
[-0.00000000e+00, -0.00000000e+00, -9.42386999e-01,
-1.47912143e-01, 1.41224899e-06, -1.40724409e-01,
-3.86450545e-02, -1.77596105e-02, -2.61565554e-01],
[-2.36544652e-01, -0.00000000e+00, -0.00000000e+00,
-5.97037403e-01, -6.33525274e-01, 2.70812665e-02,
-2.34354970e-01, 8.09905642e-02, 3.52169811e-01],
[-0.00000000e+00, -2.36544652e-01, -0.00000000e+00,
-2.80742485e-02, -2.97900030e-02, -6.93753868e-01,
5.78451116e-01, 2.06337502e-01, 2.89647600e-01],
[-0.00000000e+00, -0.00000000e+00, -2.36544652e-01,
2.94641819e-01, 3.12648820e-01, -1.12274948e-02,
-4.19760855e-01, 1.83772848e-01, 7.41205673e-01],
[-2.36544652e-01, -0.00000000e+00, -0.00000000e+00,
-5.97025305e-01, 6.33536675e-01, 2.68679525e-01,
2.81773098e-01, -9.82705016e-02, 1.58103880e-01],
[-0.00000000e+00, -2.36544652e-01, -0.00000000e+00,
-2.80736797e-02, 2.97905391e-02, 2.87983715e-01,
2.89697972e-02, 9.03711399e-01, -2.04701877e-01],
[-0.00000000e+00, -0.00000000e+00, -2.36544652e-01,
2.94635849e-01, -3.12654446e-01, 5.71869440e-01,
5.73721626e-01, -1.13019078e-01, 3.00863871e-01]])
"""
atommasses = np.asarray(atommasses)
natom = len(atommasses)
dof = 3 * natom
moments, axes, atomcoords = inertia(atommasses, atomcoords, align=False)
x = np.block(
[
np.ones(natom)[:, np.newaxis],
np.zeros(natom)[:, np.newaxis],
np.zeros(natom)[:, np.newaxis],
]
)
y = np.block(
[
np.zeros(natom)[:, np.newaxis],
np.ones(natom)[:, np.newaxis],
np.zeros(natom)[:, np.newaxis],
]
)
z = np.block(
[
np.zeros(natom)[:, np.newaxis],
np.zeros(natom)[:, np.newaxis],
np.ones(natom)[:, np.newaxis],
]
)
x *= np.sqrt(atommasses[:, np.newaxis])
y *= np.sqrt(atommasses[:, np.newaxis])
z *= np.sqrt(atommasses[:, np.newaxis])
D_trans = np.block([x.reshape(1, dof).T, y.reshape(1, dof).T, z.reshape(1, dof).T])
D_rot = np.array(
[
np.cross((atomcoords @ axes)[i], axes[:, j]) / np.sqrt(atommasses[i])
for i in range(natom)
for j in range(3)
]
)
D = np.block([D_trans, D_rot])
return np.linalg.qr(D, mode="complete")[0] | 833b18ecdb299d3183da24c3b9d40227e387a385 | 14,836 |
def as_java_array(gateway, java_type, iterable):
"""Creates a Java array from a Python iterable, using the given p4yj gateway"""
java_type = gateway.jvm.__getattr__(java_type)
lst = list(iterable)
arr = gateway.new_array(java_type, len(lst))
for i, e in enumerate(lst):
jobj = as_java_object(gateway, e)
arr[i] = jobj
return arr | d8a14a6506a0cbde6f09b4d071f6968da3e4d17d | 14,837 |
import scipy
def match(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Finds the matrix R that minimizes the frobenius norm of RA - B, where
R is orthonormal.
Args:
a (np.ndarray[samples, features]): the first matrix to match
b (np.ndarray[samples, features]): the second matrix to match
Returns:
np.ndarray: the orthonormal matching matrix R
"""
tus.check_ndarrays(
a=(a, ('samples', 'features'), ('float32', 'float64')),
b=(b, (('samples', a.shape[0]), ('features', a.shape[1])), a.dtype)
)
m = b @ a.T
u, _, vh = scipy.linalg.svd(m)
return np.real(u @ vh) | 461f3f05ab1164bfbac3f9e8f6ccd5622791a6ff | 14,838 |
import time
import requests
import json
def macro_cons_silver_amount():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 263651152
2006-05-02 263651152
2006-05-03 445408550
2006-05-04 555123947
2006-05-05 574713264
...
2019-10-17 Show All
2019-10-18 Show All
2019-10-21 Show All
2019-10-22 Show All
2019-10-23 Show All
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总价值(美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_amount"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["总价值"]
temp_append_df.name = "silver_amount"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df | d60bac23a480c056d237dda6b16eb267b2f54ee5 | 14,839 |
import random
def shuffle(answers):
"""
Returns mixed answers and the index of the correct one,
assuming the first answer is the correct one.
"""
indices = list(range(len(answers)))
random.shuffle(indices)
correct = indices.index(0)
answers = [answers[i] for i in indices]
return answers, correct | e597b4aeb65fecf47f4564f2fddb4d76d484707a | 14,840 |
from typing import Union
from pathlib import Path
def annotations_to_xml(annotations_df: pd.DataFrame, image_path: Union[str, Path],
write_file=True) -> str:
"""
Load annotations from dataframe (retinanet output format) and
convert them into xml format (e.g. RectLabel editor / LabelImg).
Args:
annotations_df (DataFrame): Format [xmin,ymin,xmax,ymax,label,...]
image_path: string/Path path to the file where these bboxes are found
write_file: Writes the xml at the same path as the image it describes.
Overwrites the existent file, if any.
Returns:
XML
<annotation>
<folder>unlabeled_imgs</folder>
<filename>autumn-forest-from-above-2210x1473.jpeg</filename>
<path>/work/trees/unlabeled_imgs/autumn-forest-from-above-2210x1473.jpeg</path>
<source>
<database>Unknown</database>
</source>
<size>
<width>2210</width>
<height>1473</height>
<depth>3</depth>
</size>
<segmented>0</segmented>
<object>
<name>tree</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>718</xmin>
<ymin>603</ymin>
<xmax>792</xmax>
<ymax>705</ymax>
</bndbox>
</object>
</annotation>
"""
image_path = Path(image_path)
out_dict = {
'folder': image_path.parent.name,
'filename': image_path.name,
'path': str(image_path),
'segmented': 0
}
xml_out = '<annotation>\n'
xml_out += dict2xml(out_dict, indent=" ") + '\n'
xml_out += "\n".join([__annotation_row_to_dict(row) for _, row in annotations_df.iterrows()])
xml_out += '\n</annotation>\n'
if write_file:
# annotations file should be near its image
file_path = image_path.parent / f'{image_path.stem}.xml'
with open(file_path, 'w+') as the_file:
the_file.write(xml_out)
return xml_out | 68ab235299da7026b77feb715e260f3e1749ec3b | 14,841 |
def depth(sequence, func=max, _depth=0):
"""
Find the nesting depth of a nested sequence
"""
if isinstance(sequence, dict):
sequence = list(sequence.values())
depth_list = [
depth(item, func=func, _depth=_depth + 1)
for item in sequence
if (isinstance(item, dict) or util_type.is_listlike(item))
]
if len(depth_list) > 0:
return func(depth_list)
else:
return _depth | 84b6e7ccaa0f7924fa4a775eca41edf8422222d0 | 14,842 |
def tei_email(elem_text):
"""
create TEI element <email> with given element text
"""
email = etree.Element("email")
email.text = elem_text
return email | cd3d6cf53f7ea5a29c4a02a4ea0d0a2d2144645c | 14,843 |
def rebuild(request):
"""Rebuild ``XPI`` file. It can be provided as POST['location']
:returns: (JSON) contains one field - hashtag it is later used to download
the xpi using :meth:`xpi.views.check_download` and
:meth:`xpi.views.get_download`
"""
# validate entries
secret = request.POST.get('secret', None)
if not secret or secret != settings.AMO_SECRET_KEY:
log.error("Rebuild requested with an invalid key. Rejecting.")
return HttpResponseForbidden('Access denied')
options = request.POST.get('options', None)
location = request.POST.get('location', None)
addons = request.POST.get('addons', None)
upload = request.FILES.get('upload', None)
if not location and not upload and not addons:
log.error("Rebuild requested but files weren't specified. Rejecting.")
return HttpResponseBadRequest('Please provide XPI files to rebuild')
if location and upload:
log.error("Rebuild requested but location and upload provided."
"Rejecting")
return HttpResponseBadRequest('Please provide XPI files to rebuild')
# locate SDK source directory
sdk_version = request.POST.get('sdk_version', None)
if sdk_version:
sdk = get_object_or_404(SDK, version=sdk_version)
sdk_source_dir = sdk.get_source_dir()
else:
sdk_source_dir = (settings.REPACKAGE_SDK_SOURCE
or _get_latest_sdk_source_dir())
sdk_manifest = '%s/packages/%s/package.json' % (sdk_source_dir, 'addon-kit')
try:
handle = open(sdk_manifest)
except Exception, err:
log.critical("Problems loading SDK manifest\n%s" % str(err))
raise
else:
sdk_version = simplejson.loads(handle.read())['version']
handle.close()
pingback = request.POST.get('pingback', None)
priority = request.POST.get('priority', None)
post = request.POST.urlencode()
if priority and priority == 'high':
rebuild_task = tasks.high_rebuild
else:
rebuild_task = tasks.low_rebuild
response = {'status': 'success'}
errors = []
counter = 0
if location or upload:
hashtag = get_random_string(10)
if location:
log.debug('[%s] Single rebuild started for location (%s)' %
(hashtag, location))
else:
log.debug('[%s] Single rebuild started from upload' % hashtag)
filename = request.POST.get('filename', None)
try:
package_overrides = _get_package_overrides(request.POST,
sdk_version)
except BadManifestFieldException, err:
errors.append('[%s] %s' % (hashtag, str(err)))
else:
rebuild_task.delay(
location, upload, sdk_source_dir, hashtag,
package_overrides=package_overrides,
filename=filename, pingback=pingback,
post=post, options=options)
counter = counter + 1
if addons:
try:
addons = simplejson.loads(addons)
except Exception, err:
errors.append('[%s] %s' % (hashtag, str(err)))
else:
for addon in addons:
error = False
filename = addon.get('filename', None)
hashtag = get_random_string(10)
location = addon.get('location', None)
upload_name = addon.get('upload', None)
upload = None
if upload_name:
upload = request.FILES.get(upload_name, None)
if not (location or upload):
errors.append("[%s] Files not specified." % hashtag)
error = True
if location and upload:
errors.append(("[%s] Location and upload provided. "
"Rejecting") % hashtag)
error = True
try:
package_overrides = _get_package_overrides(addon,
sdk_version)
except Exception, err:
errors.append('[%s] %s' % (hashtag, str(err)))
error = True
if not error:
rebuild_task.delay(
location, upload, sdk_source_dir, hashtag,
package_overrides=package_overrides,
filename=filename, pingback=pingback,
post=post)
counter = counter + 1
if errors:
log.error("Errors reported when rebuilding")
response['status'] = 'some failures'
response['errors'] = ''
for e in errors:
response['errors'] = "%s%s\n" % (response['errors'], e)
log.error(" Error: %s" % e)
response['addons'] = counter
uuid = request.POST.get('uuid', 'no uuid')
log.info("%d addon(s) will be created, %d syntax errors, uuid: %s" %
(counter, len(errors), uuid))
return HttpResponse(simplejson.dumps(response),
mimetype='application/json') | 41d2ca3d73507a99dd790254606d11e743971ba9 | 14,844 |
from typing import List
from pathlib import Path
def get_requirements(req_file: str) -> List[str]:
"""
Extract requirements from provided file.
"""
req_path = Path(req_file)
requirements = req_path.read_text().split("\n") if req_path.exists() else []
return requirements | 3433cd117bbb0ced7ee8238e36f20c69e15c5260 | 14,845 |
from typing import List
import sys
def _generate_ngram_contexts(ngram: str) -> 'List[Acronym]':
"""
Generate a list of contextualized n-grams with a decreasing central n-gram and increasing \
lateral context.
:param ngram:
:return:
"""
tokens = ngram.split(" ")
ngram_size = len(tokens)
contexts = []
# Walk only until half and `max_diff` more.
for i in range(0, int((ngram_size + 1 + MAX_DIFF) / 2)):
# Allow up to `max_diff` difference in size.
for j in range(ngram_size - i + MAX_DIFF, ngram_size - i - MAX_DIFF - 1, -1):
# Do not allow empty acronym.
if i >= j:
break
# Do not walk past the n-gram.
if j > ngram_size:
continue
left = sys.intern(" ".join(tokens[0:i]))
right = sys.intern(" ".join(tokens[j:ngram_size]))
center = sys.intern(" ".join(tokens[i:j]))
contexts.append(Acronym(acronym=center, left_context=left,
right_context=right))
return contexts | 4beb0fb1f9170191e4118f0c7bb18d0a41e58a63 | 14,846 |
def get_gmail_account(slug):
"""
Return the details of the given account - just pass in the slug
e.g. get_account('testcity')
"""
service = get_gapps_client()
if not service:
return None
try:
return service.users().get(userKey=make_email(slug)).execute()
except HttpError:
return None | 959685e6f40b8333103e47f9ce8c50050ca95961 | 14,847 |
def unisolate_machine_command():
"""Undo isolation of a machine.
Returns:
(str, dict, dict). Human readable, context, raw response
"""
headers = ['ID', 'Type', 'Requestor', 'RequestorComment', 'Status', 'MachineID', 'ComputerDNSName']
machine_id = demisto.args().get('machine_id')
comment = demisto.args().get('comment')
machine_action_response = unisolate_machine_request(machine_id, comment)
machine_action_data = get_machine_action_data(machine_action_response)
entry_context = {
'MicrosoftATP.MachineAction(val.ID === obj.ID)': machine_action_data
}
human_readable = tableToMarkdown("The request to stop the isolation has been submitted successfully:",
machine_action_data, headers=headers, removeNull=True)
return human_readable, entry_context, machine_action_response | d981005753030a1be50a3c0ff40022241096ea2f | 14,848 |
def func(*listItems):
"""
1、遍历所有的列表元素
2、遍历所有的列表元素里面的所有元素放进去一个列表里面
3、排序这个列表,返回最大的那个元素
"""
tmp_list=[]
for item in listItems:
if isinstance(item,list):
for i in item:
tmp_list.append(i)
tmp_list=list(filter(lambda k:isinstance(k,int),tmp_list))
tmp_list.sort(reverse=True)
max_value=tmp_list[0]
return max_value | adbef2744871f1d8f714cbf2a71d4321e3fb72f5 | 14,849 |
def factory(name: str):
"""Factory function to return a processing function for
Part of Speech tagging.
Parameters:
-----------
name : str
Identifier, e.g. 'spacy-de', 'stanza-de', 'flair-de', 'someweta-de',
'someweta-web-de'
Example:
--------
import nlptasks as nt
import nlptasks.pos
sequences = [['Die', 'Kuh', 'ist', 'bunt', '.']]
myfn = nt.pos.factory("spacy-de")
idseqs, TAGSET = myfn(sequences, maxlen=4)
"""
if name in ("spacy", "spacy-de"):
return spacy_de
elif name in ("stanza", "stanza-de"):
return stanza_de
elif name == "flair-de":
return flair_de
elif name in ("someweta", "someweta-de"):
return someweta_de
elif name in ("someweta-web", "someweta-web-de"):
return someweta_web_de
else:
raise Exception(f"Unknown PoS tagger: '{name}'") | 9166613ba98beeb56dcc5766217d951ff13f9b38 | 14,850 |
def backoff_handler(debug_only=True):
"""Backoff logging handler for when polling occurs.
Args:
details (dict): Backoff context containing the number of tries,
target function currently executing, kwargs, args, value,
and wait time.
"""
def _wrapped(details):
message = '[Backoff]: Calling \'{}\' again in {:f} seconds with {:d} tries so far'.format(
details['target'].__name__,
details['wait'],
details['tries']
)
if not debug_only:
LOGGER.info(message)
else:
LOGGER.debug(message)
return _wrapped | db76c6d857fd5df000b2bc3ff048d0d180f71a37 | 14,851 |
def align_dataframes(framea, frameb, fill_value = 0.0):
"""Use pandas DataFrame structure to align two-dimensional data
:param framea: First pandas dataframe to align
:param frameb: Other pandas dataframe to align
:param fill_value: default fill value (0.0 float)
return: tuple of aligned frames
"""
zeroframe = frameb.copy()
zeroframe[:] = fill_value
aligneda = framea.add(zeroframe, fill_value = fill_value)
zeroframe = framea.copy()
zeroframe[:] = fill_value
alignedb = frameb.add(zeroframe, fill_value = fill_value)
return aligneda, alignedb | 86a5e8c399ab47a10715af6c90d0901c2207597c | 14,852 |
def flip_ud(img):
"""
Expects shape to be (num_examples, modalities, depth, width, height)
"""
return np.flip(img.copy(), 3) | f7a14641a89f5a170cb3d19b412acdbcbe3ac2f3 | 14,853 |
def data_context_service_interface_pointuuid_otsi_service_interface_point_spec_otsi_capability_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_otsi_service_interface_point_spec_otsi_capability_get
returns tapi.photonic.media.OtsiCapabilityPac # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiPhotonicMediaOtsiCapabilityPac
"""
return 'do some magic!' | 99b3ed0e843f0dd405cd0d0b618a4da92fbdcf55 | 14,854 |
def _get_trial_event_times(events, units, trial_cond_name):
"""
Get median event start times from all unit-trials from the specified "trial_cond_name" and "units" - aligned to GO CUE
:param events: list of events
"""
events = list(events) + ['go']
event_types, event_times = (psth.TrialCondition().get_trials(trial_cond_name)
* (experiment.TrialEvent & [{'trial_event_type': eve} for eve in events])
& units).fetch('trial_event_type', 'trial_event_time')
period_starts = [(event_type, np.nanmedian((event_times[event_types == event_type]
- event_times[event_types == 'go']).astype(float)))
for event_type in events[:-1] if len(event_times[event_types == event_type])]
present_events, event_starts = list(zip(*period_starts))
return np.array(present_events), np.array(event_starts) | c7198fdba392d7b5301109175408d3c0d95adbb9 | 14,855 |
import os
def is_path_exists_or_creatable(pathname=None):
"""
`True` if the passed pathname is a valid pathname for the current OS _and_
either currently exists or is hypothetically creatable; `False` otherwise.
This function is guaranteed to _never_ raise exceptions.
"""
try:
# To prevent "os" module calls from raising undesirable exceptions on
# invalid pathnames, is_pathname_valid() is explicitly called first.
return is_pathname_valid(pathname) and (os.path.exists(pathname) or is_path_creatable(pathname))
# Report failure on non-fatal filesystem complaints (e.g., connection
# timeouts, permissions issues) implying this path to be inaccessible. All
# other exceptions are unrelated fatal issues and should not be caught here.
except OSError:
return False | ca93215ab86fb9d68d21262a3b941f32bea3c474 | 14,856 |
from typing import Iterable
def select_region(selections, positions, region):
"""
selection in region from selections
"""
if not region:
return selections
region = list(region) + [None, None]
assert all([x is None or isinstance(x, Iterable) and len(x) == 2
for x in region]), 'region should be collections of x,y,z region'
output = []
for sel in selections:
for regi, reg in enumerate(region[:3]):
if reg:
if reg[0] <= positions[sel][regi] <= reg[1]:
output.append(sel)
return output | b9efc393b7d60773554130ded49d9dc9e00081e5 | 14,857 |
def summarize_center_and_dispersion(
analysis_layer,
summarize_type=["CentralFeature"],
ellipse_size=None,
weight_field=None,
group_field=None,
output_name=None,
context=None,
gis=None,
estimate=False,
future=False):
"""
.. image:: _static/images/summarize_center_and_dispersion/summarize_center_and_dispersion.png
The ``summarize_center_and_dispersion`` method finds central features and directional distributions. It can be used to answer questions such as:
* Where is the center?
* Which feature is the most accessible from all other features?
* How dispersed, compact, or integrated are the features?
* Are there directional trends?s
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
analysis_layer Required frature layer. The point, line, or polygon features to be analyzed. See :ref:`Feature Input<FeatureInput>`.
-------------------- ---------------------------------------------------------
summarize_type Required list of strings. The method with which to summarize the ``analysis_layer``.
Choice list: ["CentralFeature", "MeanCenter", "MedianCenter", "Ellipse"]
-------------------- ---------------------------------------------------------
ellipse_size Optional string. The size of the output ellipse in standard deviations.
Choice list: ['1 standard deviations', '2 standard deviations', '3 standard deviations']
The default ellipse size is '1 standard deviations'.
-------------------- ---------------------------------------------------------
weight_field Optional field. A numeric field in the ``analysis_layer`` to be used to
weight locations according to their relative importance.
-------------------- ---------------------------------------------------------
group_field Optional field. The field used to group features for separate directional
distribution calculations. The ``group_field`` can be of
integer, date, or string type.
-------------------- ---------------------------------------------------------
output_name Optional string. If provided, the method will create a feature service of the results.
You define the name of the service. If ``output_name`` is not supplied, the method will return a feature collection.
-------------------- ---------------------------------------------------------
context Optional string. Context contains additional settings that affect task execution. For ``summarize_center_and_dispersion``, there are two settings.
#. Extent (``extent``) - a bounding box that defines the analysis area. Only those features in the input layer that intersect the bounding box will be buffered.
#. Output Spatial Reference (``outSR``) - the output features will be projected into the output spatial reference.
-------------------- ---------------------------------------------------------
estimate Optional boolean. If True, the number of credits to run the operation will be returned.
-------------------- ---------------------------------------------------------
future Optional boolean. If True, the result will be a GPJob object and results will be returned asynchronously.
==================== =========================================================
:returns: list of items if ``output_name`` is supplied else, a Python dictionary with the following keys:
"central_feature_result_layer" : layer (FeatureCollection)
"mean_feature_result_layer" : layer (FeatureCollection)
"median_feature_result_layer" : layer (FeatureCollection)
"ellipse_feature_result_layer" : layer (FeatureCollection)
.. code-block:: python
# USAGE EXAMPLE: To find central features and mean center of earthquake over past months.
central_features = summarize_center_and_dispersion(analysis_layer=earthquakes,
summarize_type=["CentralFeature","MeanCenter"],
ellipse_size='2 standard deviations',
weight_field='mag',
group_field='magType',
output_name='find central features and mean center of earthquake over past months')
"""
gis = _arcgis.env.active_gis if gis is None else gis
return gis._tools.featureanalysis.summarize_center_and_dispersion(
analysis_layer,
summarize_type,
ellipse_size,
weight_field,
group_field,
output_name,
context,
estimate=estimate, future=future) | a1fc44cb1781bb11f39dda597fe884552ec07a99 | 14,858 |
def length_entropy(r: np.ndarray, minlen: int = 2) -> float:
"""Calculate entropy of diagonal lengths in RQ matrix.
Args:
r (np.ndarray[bool, bool]): Recurrence matrix
minlen (int): Minimum length of a line
Returns:
float: Shannon entropy of distribution of segment lengths
"""
dlens = diagonal_lengths(r, minlen)
counts = _dlen_counts(dlens, minlen, r.shape[0])
return entropy(counts) | fe20e36aade8bae5e8a2fc139ad887495818f336 | 14,859 |
def verify_scholarship_chair(user):
""" Verify user has Scholarship Chair permissions """
user_id = user.brother.id
if Position.objects.filter(title='President')[0].brother.id == user_id or \
Position.objects.filter(title='Scholarship Chair')[0].brother.id == user_id or \
debug:
return True
else:
return False | 53b1f0331e9af87cced904b2c26bef7a1d600cf2 | 14,860 |
def rotate(posList, axis, angle):
"""Rotate the points about a given axis by a given angle."""
#normalize axis, turn angle into radians
axis = axis/np.linalg.norm(axis)
angle = np.deg2rad(angle)
#rotation matrix construction
ux, uy, uz = axis
sin, cos = np.sin(angle), np.cos(angle)
rotMat = np.array([[cos+ux*ux*(1.-cos), ux*uy*(1.-cos)-uz*sin, ux*uz*(1.-cos)+uy*sin],
[uy*ux*(1.-cos)+uz*sin, cos+uy*uy*(1.-cos), uy*uz*(1.-cos)-ux*sin],
[uz*ux*(1.-cos)-uy*sin, uz*uy*(1.-cos)+ux*sin, cos+uz*uz*(1.-cos)]])
#rotate points
return np.transpose(np.dot(rotMat,np.transpose(posList))) | 0719bf548f5d952e78f0b2551f2edcd9510b1eca | 14,861 |
def _make_frame_with_filename(tb, idx, filename):
"""Return a copy of an existing stack frame with a new filename."""
frame = tb[idx]
return FrameSummary(
filename,
frame.lineno,
frame.name,
frame.line) | c775b77c3c282ed598adc25996fb418a9b85529e | 14,862 |
def median(X):
"""
Middle value after sorting all values by size, or mean of the two middle values.
Parameters
----------
X : np.array
Dataset. Should be a two-dimensional array.
Returns
-------
a: np.array
One-dimensional array that contains the median for each feature.
"""
return np.nanmedian(X, axis=0) | 232d1ce560c4030b01b048cb9087d5e8c49b39ec | 14,863 |
def _filter_none_values(d: dict):
"""
Filter out the key-value pairs with `None` as value.
Arguments:
d
dictionary
Returns:
filtered dictionary.
"""
return {key: value for (key, value) in d.items() if value is not None} | bed2629e4fa96a391e15b043aa3a0d64c75d6ed0 | 14,864 |
def new_project(request):
"""
if this is a new project, call crud_project without a slug and
with action set to New
"""
return crud_project(request, slug=None, action="New") | fc224a23fb2ecc39fce20a927c57be0ff74ed9d1 | 14,865 |
def get_Simon_instance(simon_instance):
"""Return an instance of the Simon family as a `Cipher`."""
if simon_instance == SimonInstance.simon_32_64:
default_rounds = 32
n = 16
m = 4
z = "11111010001001010110000111001101111101000100101011000011100110"
elif simon_instance == SimonInstance.simon_48_96:
default_rounds = 36
n = 24
m = 4
z = "10001110111110010011000010110101000111011111001001100001011010"
elif simon_instance == SimonInstance.simon_64_128:
default_rounds = 44
n = 32
m = 4
z = "11011011101011000110010111100000010010001010011100110100001111"
else:
raise ValueError("invalid instance of Simon")
class SimonKeySchedule(RoundBasedFunction):
"""Key schedule function."""
num_rounds = default_rounds
input_widths = [n for _ in range(m)]
output_widths = [n for _ in range(default_rounds)]
@classmethod
def set_num_rounds(cls, new_num_rounds):
cls.num_rounds = new_num_rounds
cls.input_widths = [n for _ in range(min(m, new_num_rounds))]
cls.output_widths = [n for _ in range(new_num_rounds)]
@classmethod
def eval(cls, *master_key):
if cls.num_rounds <= m:
return list(reversed(master_key))[:cls.num_rounds]
k = [None for _ in range(cls.num_rounds)]
k[:m] = list(reversed(master_key))
for i in range(m, cls.num_rounds):
tmp = RotateRight(k[i - 1], 3)
if m == 4:
tmp ^= k[i - 3]
tmp ^= RotateRight(tmp, 1)
k[i] = ~k[i - m] ^ tmp ^ int(z[(i - m) % 62]) ^ 3
return k
class SimonEncryption(Encryption, RoundBasedFunction):
"""Encryption function."""
num_rounds = default_rounds
input_widths = [n, n]
output_widths = [n, n]
round_keys = None
@classmethod
def set_num_rounds(cls, new_num_rounds):
cls.num_rounds = new_num_rounds
@classmethod
def eval(cls, x, y):
for i in range(cls.num_rounds):
x, y = (y ^ SimonRF(x) ^ cls.round_keys[i], x)
cls.add_round_outputs(x, y)
return x, y
class SimonCipher(Cipher):
key_schedule = SimonKeySchedule
encryption = SimonEncryption
_simon_instance = simon_instance
@classmethod
def set_num_rounds(cls, new_num_rounds):
cls.key_schedule.set_num_rounds(new_num_rounds)
cls.encryption.set_num_rounds(new_num_rounds)
@classmethod
def test(cls):
old_num_rounds = cls.num_rounds
cls.set_num_rounds(default_rounds)
if cls._simon_instance == SimonInstance.simon_32_64:
plaintext = (0x6565, 0x6877)
key = (0x1918, 0x1110, 0x0908, 0x0100)
assert cls(plaintext, key) == (0xc69b, 0xe9bb)
elif cls._simon_instance == SimonInstance.simon_48_96:
plaintext = (0x726963, 0x20646e)
key = (0x1a1918, 0x121110, 0x0a0908, 0x020100)
assert cls(plaintext, key) == (0x6e06a5, 0xacf156)
elif cls._simon_instance == SimonInstance.simon_64_128:
plaintext = (0x656b696c, 0x20646e75)
key = (0x1b1a1918, 0x13121110, 0x0b0a0908, 0x03020100)
assert cls(plaintext, key) == (0x44c8fc20, 0xb9dfa07a)
else:
raise ValueError("invalid instance of Simon")
cls.set_num_rounds(old_num_rounds)
return SimonCipher | 41ce1cdfdb58b15af8167f5e0d03fcd0beb94c80 | 14,866 |
import os
import struct
def load_mnist(path, kind="train"):
"""
Documentation:
---
Description:
Load MNIST images and labels from unzipped source files.
---
Parameters:
kind : str
Used to identify training data vs. validation data. Pass
"train" to load training data, and "t10k" to load validation
data.
---
Returns
images : Numpy array
Numpy array containing all images in dataset. Has shape N by
784, where N is the number of samples and 784 is the number
of pixels.
targets : Numpy array
Numpy array containing all targets associated with images.
Has shape N by 1, where N is the number of samples.
"""
labels_path = os.path.join(path,
"{}-labels-idx1-ubyte".format(kind))
images_path = os.path.join(path,
"{}-images-idx3-ubyte".format(kind))
with open(labels_path, "rb") as lbpath:
magic, n = struct.unpack(">II",
lbpath.read(8))
targets = np.fromfile(lbpath,
dtype=np.uint8)
with open(images_path, "rb") as imgpath:
magic, num, rows, cols = struct.unpack(">IIII",
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(
len(targets), 784)
return images, targets | 0e67fe5930e652a7a153dfe9f1c3e1ac1a4eb51c | 14,867 |
def clamp(val: float) -> int:
"""Clamp a number to that expected by a reasonable RGB component
This ensures that we don't have negative values, or values exceeding one byte
Additionally, all number inputs are rounded
Args:
val (float): Raw float value to clamp
Returns:
int: Clamped R/G/B value
"""
return floor(min(max(0, val), 255)) | b908afd06f8e5bf9b98f2729424e0b007c62a18a | 14,868 |
import scipy
def componental_mfpt(trans: np.ndarray, **kwargs) -> np.ndarray:
"""Compute Markov mean first passage times per connected component of the chain."""
n_comps, comp_labels = scipy.sparse.csgraph.connected_components(
trans, **kwargs
)
hier_trans = transition_matrix(trans)
absorbing = np.isclose(np.diag(hier_trans), 1)
if n_comps == 1 and not absorbing.any():
print('shortcut')
return mfpt(hier_trans)
else:
print('longrun')
times = np.full_like(hier_trans, fill_value=np.inf)
# for each autonomous subsystem
for comp_i in range(n_comps):
is_comp = (comp_labels == comp_i)
absorbing_i = np.flatnonzero(absorbing & is_comp)
nonabsorbing_i = np.flatnonzero(~absorbing & is_comp)
times[nonabsorbing_i[:, None], nonabsorbing_i] = mfpt(
hier_trans[nonabsorbing_i[:, None], nonabsorbing_i]
)
times[absorbing_i, absorbing_i] = 1
return times | 76c9ade340668e5f564b874bc808170b2d0903cb | 14,869 |
import os
def get_snippet(path):
"""Get snippet source string"""
current_file_dir = os.path.dirname(__file__)
absolute_path = os.path.join(current_file_dir, path)
with open(absolute_path) as src:
return src.read() | e101a25c61313d0531e0c38e27b120d56fcd8a47 | 14,870 |
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
array1, array2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or sequence of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([ 1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0 :
result = ary.reshape(1)
else :
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res | 440782c7c5a5b231425cc1c1282110e983dd8dc2 | 14,871 |
def lines2str(lines, sep = "\n"):
"""Merge a list of lines into a single string
Args:
lines (list, str, other): a list of lines or a single object
sep (str, optional): a separator
Returns:
str: a single string which is either a concatenated lines (using
a custom or the default separator) or a str(lines) result
"""
if isinstance(lines, str):
return lines
if hasattr(lines, '__iter__'):
return sep.join(lines)
return str(lines) | a9209bd8eda92f42a287725aaeccfcb35dab24cd | 14,872 |
import os
import subprocess
def tail(fname, n=10, with_tail='tail'):
"""Get the last lines in a file.
Parameters
----------
fname : str
File name.
n : int, optional
Number of lines to get (default is 10).
with_tail : str, optional
The 'tail' command to use (default is `tail`).
Returns
-------
str
The last lines in file, or None on error.
"""
fname = os.path.abspath(fname)
try:
lines = subprocess.check_output(
[with_tail, '-n{n}'.format(n=n), fname])
except subprocess.CalledProcessError:
raise RuntimeError('Unable to get status. Please try again.')
except Exception:
raise
else:
return lines.strip() | 6fe1f0cae399653401dec8226e2e9f3c271bdc16 | 14,873 |
def evaluate(board):
"""
Evaluates chess board
input parameter(s):
board --> The chess board to be evaluated
return parameter(s):
score --> The board evaluation
"""
score = 0
for i in range(len(board)):
for j in range(len(board[i])):
# Add piece value and it's current square value (A Queen on d4 will be worth 900 + 5)
piece_value = piece_values[board[i][j]] + \
square_values[board[i][j]][i][j]
# Add piece value to overall board score
score += piece_value
return score | 6b02f085ab47d241f7639143d82570e97891975a | 14,874 |
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
An atom could be an rfc2047 encoded word.
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_atext(value)
else:
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value | f7d93daabfc96138e79443eb2e5c7e7a9b28fbbc | 14,875 |
def create_page():
"""新增页面"""
tags = dbutils.get_tags()
return render_template('edit.html', title='新建', edit=False, tags=tags) | 48c14aabc76ff3c4886b2ff7f1340035936d81ce | 14,876 |
def calculate_class_weights():
"""
:return: class-wise true-label-area / false-label-area as a dictionary
"""
df = collect_stats()
df = df.fillna(0)
df = df.pivot(index = 'Class', columns = 'ImageId', values = 'TotalArea')
df = df.sum(axis=1)
df = df / (2500. - df)
return df.to_dict() | 42d006daee27d1400d76e9233e64e4d09683573c | 14,877 |
def get_data_layer(roidb, num_classes):
"""return a data layer."""
if cfg.TRAIN.HAS_RPN:
if cfg.IS_MULTISCALE:
layer = GtDataLayer(roidb)
else:
layer = RoIDataLayer(roidb, num_classes)
else:
layer = RoIDataLayer(roidb, num_classes)
return layer | 32035fc837b402949a5fb75af0ad5dbe26a2e129 | 14,878 |
def split_on_first_brace(input,begin_brace = "{",end_brace = "}",error_replacement="brace_error"):
"""
input: string with {Something1} Something2
output: tuple (Something1,Something2)
"""
if error_replacement=="chapter_error":
print(input[:20])
input = remove_empty_at_begin(input)
if len(input) == 0:
#raise RuntimeError("hi")
print("first brace empty string ERROR")
return error_replacement,input
if input[0] != begin_brace:
print("first brace NOT Brace ERROR")
return error_replacement,input
brace_count = 0
out1 = ""
for elem in input:
out1 += elem
if elem == begin_brace:
brace_count = brace_count + 1
if elem == end_brace:
brace_count = brace_count - 1
if brace_count == 0:
break
out2 = input[len(out1):]
out1 = out1[1:-1]
return out1, out2 | 201f6789f9db65aa98b857c923e3ed0484aaea89 | 14,879 |
import os
import re
def find_files(folder_path: str, pattern: str, maxdepth: int = 1):
"""
Read the absolute path of files under a folder
TODO: make it recursive
"""
assert isinstance(folder_path, str), 'folder path must be a string'
assert maxdepth >= 0
if maxdepth == 0:
return []
res = []
for file_name in os.listdir(folder_path):
if file_name.startswith('__') or file_name.startswith('.'):
continue
abs_path = osp.join(folder_path, file_name)
if osp.isfile(abs_path):
if re.search(pattern, file_name):
res.append(abs_path)
elif osp.isdir(abs_path):
sub_list = find_files(abs_path, pattern, maxdepth-1)
res += sub_list
return res | c8c3b59281b183d69b829f164504acc520c89239 | 14,880 |
import warnings
def check_count(value, total_count, dimension_type):
"""check the value for count."""
value = validate(value, "count", int)
if value > total_count:
raise ValueError(
f"Cannot set the count, {value}, more than the number of coordinates, "
f"{total_count}, for the {dimension_type} dimensions."
)
if value < total_count:
warnings.warn(f"The number of labels, {total_count}, are truncated to {value}.")
return value | aed0b31e041c3c8ca791533697c2ad9e292a8fcc | 14,881 |
import json
def request_certificate(request):
"""Request the on-demand creation of a certificate for some user, course.
A request doesn't imply a guarantee that such a creation will take place.
We intentionally use the same machinery as is used for doing certification
at the end of a course run, so that we can be sure users get graded and
then if and only if they pass, do they get a certificate issued.
"""
if request.method == "POST":
if request.user.is_authenticated():
# Enter your api key here
xqci = CertificateGeneration(
api_key=settings.APPSEMBLER_FEATURES['ACCREDIBLE_API_KEY']
)
username = request.user.username
student = User.objects.get(username=username)
course_key = CourseKey.from_string(
request.POST.get('course_id')
)
course = get_course(course_key)
status = certificate_status_for_student(
student,
course_key)['status']
if status in [CertificateStatuses.unavailable, CertificateStatuses.notpassing, CertificateStatuses.error]:
logger.info(
'Grading and certification requested for user {} in course {} via /request_certificate call'.format(username, course_key))
status = xqci.add_cert(student, course_key, course=course)
return HttpResponse(
json.dumps(
{'add_status': status}
), content_type='application/json')
return HttpResponse(
json.dumps(
{'add_status': 'ERRORANONYMOUSUSER'}
), content_type='application/json') | 73605a4d02d515656ddbd4cb63e1c810d65f5f2e | 14,882 |
def get_useable_checkers():
"""
列出可用插件列表
:return:
"""
useable_checkers = list()
for (checker_name, checker_instance) in CHECKER_INSTANCE_DICT.items():
if checker_instance.useable:
useable_checkers.append(checker_instance)
return useable_checkers | 9548c4423f2176081e59957a823afb986f134c7a | 14,883 |
def get_training_roidb(imdb):
"""Returns a roidb (Region of Interest database) for use in training."""
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
print 'Preparing training data...'
wrdl_roidb.prepare_roidb(imdb)
print 'done'
return imdb.roidb | adec6258d6ffa810aef475ec5257ef92a762f1fa | 14,884 |
def _used_in_calls(schedule_name: str, schedule: ScheduleBlock) -> bool:
"""Recursively find if the schedule calls a schedule with name ``schedule_name``.
Args:
schedule_name: The name of the callee to identify.
schedule: The schedule to parse.
Returns:
True if ``schedule``calls a ``ScheduleBlock`` with name ``schedule_name``.
"""
blocks_have_schedule = False
for block in schedule.blocks:
if isinstance(block, Call):
if block.subroutine.name == schedule_name:
return True
else:
blocks_have_schedule = blocks_have_schedule or _used_in_calls(
schedule_name, block.subroutine
)
if isinstance(block, ScheduleBlock):
blocks_have_schedule = blocks_have_schedule or _used_in_calls(schedule_name, block)
return blocks_have_schedule | 9d6ff0b3a415047252ef2148aa6e59e229531ef7 | 14,885 |
def font_size_splitter(font_map):
"""
Split fonts to 4 category (small,medium,large,xlarge) by maximum length of letter in each font.
:param font_map: input fontmap
:type font_map : dict
:return: splitted fonts as dict
"""
small_font = []
medium_font = []
large_font = []
xlarge_font = []
fonts = set(font_map.keys()) - set(RANDOM_FILTERED_FONTS)
for font in fonts:
length = max(map(len, font_map[font][0].values()))
if length <= FONT_SMALL_THRESHOLD:
small_font.append(font)
elif length > FONT_SMALL_THRESHOLD and length <= FONT_MEDIUM_THRESHOLD:
medium_font.append(font)
elif length > FONT_MEDIUM_THRESHOLD and length <= FONT_LARGE_THRESHOLD:
large_font.append(font)
else:
xlarge_font.append(font)
return {
"small_list": small_font,
"medium_list": medium_font,
"large_list": large_font,
"xlarge_list": xlarge_font} | d047e182df8d9015997c2debd6269012cb779df5 | 14,886 |
from typing import Optional
from typing import Collection
from typing import Union
from typing import List
import pandas
def get_candidate_set_size(
mapped_triples: MappedTriples,
restrict_entities_to: Optional[Collection[int]] = None,
restrict_relations_to: Optional[Collection[int]] = None,
additional_filter_triples: Union[None, MappedTriples, List[MappedTriples]] = None,
num_entities: Optional[int] = None,
) -> pandas.DataFrame:
"""
Calculate the candidate set sizes for head/tail prediction for the given triples.
:param mapped_triples: shape: (n, 3)
the evaluation triples
:param restrict_entities_to:
The entity IDs of interest. If None, defaults to all entities. cf. :func:`restrict_triples`.
:param restrict_relations_to:
The relations IDs of interest. If None, defaults to all relations. cf. :func:`restrict_triples`.
:param additional_filter_triples: shape: (n, 3)
additional filter triples besides the evaluation triples themselves. cf. `_prepare_filter_triples`.
:param num_entities:
the number of entities. If not given, this number is inferred from all triples
:return: columns: "index" | "head" | "relation" | "tail" | "head_candidates" | "tail_candidates"
a dataframe of all evaluation triples, with the number of head and tail candidates
"""
# optionally restrict triples (nop if no restriction)
mapped_triples = restrict_triples(
mapped_triples=mapped_triples,
entities=restrict_entities_to,
relations=restrict_relations_to,
)
# evaluation triples as dataframe
columns = [LABEL_HEAD, LABEL_RELATION, LABEL_TAIL]
df_eval = pandas.DataFrame(
data=mapped_triples.numpy(),
columns=columns,
).reset_index()
# determine filter triples
filter_triples = prepare_filter_triples(
mapped_triples=mapped_triples,
additional_filter_triples=additional_filter_triples,
)
# infer num_entities if not given
if restrict_entities_to:
num_entities = len(restrict_entities_to)
else:
# TODO: unique, or max ID + 1?
num_entities = num_entities or filter_triples[:, [0, 2]].view(-1).unique().numel()
# optionally restrict triples
filter_triples = restrict_triples(
mapped_triples=filter_triples,
entities=restrict_entities_to,
relations=restrict_relations_to,
)
df_filter = pandas.DataFrame(
data=filter_triples.numpy(),
columns=columns,
)
# compute candidate set sizes for different targets
# TODO: extend to relations?
for target in [LABEL_HEAD, LABEL_TAIL]:
total = num_entities
group_keys = [c for c in columns if c != target]
df_count = df_filter.groupby(by=group_keys).agg({target: "count"})
column = f"{target}_candidates"
df_count[column] = total - df_count[target]
df_count = df_count.drop(columns=target)
df_eval = pandas.merge(df_eval, df_count, on=group_keys, how="left")
df_eval[column] = df_eval[column].fillna(value=total)
return df_eval | ccef15c8dd42e63405bfcaa45231f8efa04e0fd5 | 14,887 |
def dm2skin_normalizeWeightsConstraint(x):
"""Constraint used in optimization that ensures
the weights in the solution sum to 1"""
return sum(x) - 1.0 | 79024cb70fd6cbc3c31b0821baa1bcfb29317043 | 14,888 |
import os
def _load_bitmap(filename):
"""
Load a bitmap file from the backends/images subdirectory in which the
matplotlib library is installed. The filename parameter should not
contain any path information as this is determined automatically.
Returns a wx.Bitmap object
"""
basedir = os.path.join(rcParams['datapath'],'images')
bmpFilename = os.path.normpath(os.path.join(basedir, filename))
if not os.path.exists(bmpFilename):
raise IOError('Could not find bitmap file "%s"; dying'%bmpFilename)
bmp = wx.Bitmap(bmpFilename)
return bmp | 9d6d53aa7e370ac064e7db20c7e825f8cc004177 | 14,889 |
import requests
def get_auth():
"""
POST request to users/login, returns auth token
"""
try:
url_user_login = f"https://{url_core_data}/users/login"
json = {
"username": creds_name,
"password": creds_pw
}
headers = {
"Accept": "application/json"
}
r = requests.post(url_user_login, headers=headers, json=json, verify=False)
response = r.json()
code = r.status_code
token = response["token"]
# print(f"RESPONSE: {response}")
# print(f"STATUS_CODE: {code}")
# print(f"TOKEN: {token}")
return token
except Exception as e:
auth_err_msg = f"Error authenticating with the DIVA API: \n\
{e}"
logger.error(auth_err_msg) | 86aa225a8c856dd46ece14d982a24b32a52a87ef | 14,890 |
def vector_between_points(P, Q):
""" vector between initial point P and terminal point Q """
return vector_subtract(Q, P); | de86c29dfe8c75b31040942d7195b8b92d731106 | 14,891 |
import time
def before_train(loaded_train_model, train_model, train_sess, global_step, hparams, log_f):
"""Misc tasks to do before training."""
stats = init_stats()
info = {"train_ppl": 0.0, "speed": 0.0, "avg_step_time": 0.0,
"avg_grad_norm": 0.0, "avg_train_sel": 0.0,
"learning_rate": loaded_train_model.learning_rate.eval(
session=train_sess)}
start_train_time = time.time()
print_out("# Start step %d, lr %g, %s" % (global_step, info["learning_rate"], time.ctime()), log_f)
# Initialize all of the iterators
skip_count = hparams.qe_batch_size * hparams.epoch_step
print_out("# Init train iterator, skipping %d elements" % skip_count)
train_sess.run(
train_model.iterator.initializer,
feed_dict={train_model.skip_count_placeholder: skip_count})
return stats, info, start_train_time | d333808bec0771e74d709f859b7423b9e561703f | 14,892 |
import os
def reg_file_comp(ref_file, comp_file):
"""Compare the reference file 'ref_file' with 'comp_file'. The
order of these two files matter. The ref_file MUST be given
first. Only values specified by reg_write() are compared. All
other lines are ignored. Floating point values are compared based
on rel_tol and abs_tol"""
all_ref_lines = []
ref_values = []
comp_values = []
try:
f = open(ref_file, "r")
except IOError:
print("File %s was not found. Cannot do comparison." % (ref_file))
return REG_ERROR
for line in f.readlines():
all_ref_lines.append(line)
if line[0:6] == "@value":
ref_values.append(line)
# end for
f.close()
try:
f = open(comp_file, "r")
except IOError:
print("File %s was not found. Cannot do comparison." % (comp_file))
return REG_ERROR
for line in f.readlines():
if line[0:6] == "@value":
comp_values.append(line)
# end for
f.close()
# Copy the comp_file to compe_file.orig
os.system("cp %s %s.orig" % (comp_file, comp_file))
# We must check that we have the same number of @value's to compare:
if len(ref_values) != len(comp_values):
print("Error: number of @value lines in file not the same!")
return REG_FILES_DO_NOT_MATCH
# end if
# Open the (new) comp_file:
f = open(comp_file, "w")
# Loop over all the ref_lines, for value lines, do the
# comparison. If comparison is ok, write the ref line, otherwise
# write orig line.
j = 0
res = REG_FILES_MATCH
for i in range(len(all_ref_lines)):
line = all_ref_lines[i]
if line[0:6] == "@value":
if _reg_str_comp(line, comp_values[j]) is False:
f.write(comp_values[j])
res = REG_FILES_DO_NOT_MATCH
else:
f.write(line)
# end if
j += 1
else:
f.write(line)
# end if
# end for
f.close()
return res | 4b3999625880cbf30de5cc4ec062ce700866a539 | 14,893 |
def methodInDB(method_name, dict_link, interface_db_cursor): #checks the database to see if the method exists already
"""
Method used to check the database to see if a method exists in the database
returns a list [Boolean True/False of if the method exists in the db, dictionary link/ID]
"""
crsr = interface_db_cursor
#splitting method into parts
if "::" in method_name:
method = method_name.split('::')
cn = method[0].strip()
mn = '::'.join(method[1:]).strip()
else:
cn = "Unknown"
mn = method_name
if dict_link == '': #dict link should only be empty on the initial call
# search for any method with the same name and class
crsr.execute("SELECT class_name, method_name, method_text, dict_link FROM methods WHERE class_name = ? AND method_name = ?", (cn, mn))
res = crsr.fetchall()
if len(res) == 0: #method not in table
return [False, '']
else: # found something, verify it is right
if len(res) == 1:
print('Method found in database.')
if res[0][0] == 'Unknown':
print(res[0][1])
else:
print('::'.join(res[0][0:2]))
print(res[0][2])
print('Is this the correct method? (Y/N)') #prompt the user to confirm that this is the right method
k = input()
k = k.strip()
while( k not in ['N', 'n', 'Y', 'y' ] ):
print('Invalid input, try again')
k = input()
if k == 'Y' or k == 'y':
return [True, res[0][3]]
elif k == 'N' or k == 'n':
return [False, '']
elif len(res) > 1:
print("\nMethod found in database")
count = 1
for r in res:
tmp = str(count) + ': '
print(tmp)
if r[0] == 'Unknown':
print(r[1])
else:
print('::'.join(r[0:2]))
print(r[2],'\n')
count += 1
print('Which one of these is the correct method?\nPut 0 for none of them.') #if there are multiple versions of the method in the db
# prompt the user to select which method is the right method, prints the method text
k = input()
try: k = int(k)
except: k = -1
while( int(k) > len(res) or int(k) < 0 ):
print("Invalid input: try again please")
k = input()
try: k = int(k)
except: k = -1
if k == 0:
return [False, '']
elif k > 0 and k <= len(res):
return [True, res[k-1][3]]
else: #there is a dict_link, can check for exact, usually what happens
crsr.execute("SELECT class_name, method_name FROM methods WHERE class_name = ? AND method_name = ? AND dict_link = ?", (cn, mn, dict_link))
#simple sql select
res = crsr.fetchall()
if len(res) == 0: #method not in table
return [False, dict_link]
elif len(res) > 0: # we found something
return [True, dict_link] | 8dc3ecc256b696a06906e63a461c241ff429e8ae | 14,894 |
def dict_to_image(screen):
""" Takes a dict of room locations and their block type output by RunGame.
Renders the current state of the game screen.
"""
picture = np.zeros((51, 51))
# Color tiles according to what they represent on screen:.
for tile in screen:
pos_x, pos_y = tile
if pos_x < 51 and pos_y < 51:
if screen[tile] == 46:
picture[pos_y][pos_x] = 0;
elif screen[tile] == 35:
picture[pos_y][pos_x] = 240;
else:
picture[pos_y][pos_x] = 150
return picture | 5657d3984a035d11854ef2b1f6dff642a00032a1 | 14,895 |
from .model_store import download_model
import os
def get_mobilenet(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join('~', '.keras', 'models'),
**kwargs):
"""
Create MobileNet or FD-MobileNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('orig' or 'fd').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
if version == 'orig':
channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 512], [1024, 1024]]
first_stage_stride = False
elif version == 'fd':
channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 1024]]
first_stage_stride = True
else:
raise ValueError("Unsupported MobileNet version {}".format(version))
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
net = mobilenet(
channels=channels,
first_stage_stride=first_stage_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net | da4f81d06bc041b63b47f096eed0c41dc7c39390 | 14,896 |
def can_fuse_to(wallet):
"""We can only fuse to wallets that are p2pkh with HD generation. We do
*not* need the private keys."""
return isinstance(wallet, Standard_Wallet) | 1ee8693d7457591a64057a4913d9739f96319e7a | 14,897 |
def _build_context(hps, encoder_outputs):
"""Compute feature representations for attention/copy.
Args:
hps: hyperparameters.
encoder_outputs: outputs by the encoder RNN.
Returns:
Feature representation of [batch_size, seq_len, decoder_dim]
"""
with tf.variable_scope("memory_context"):
context = tf.layers.dense(
encoder_outputs,
units=hps.decoder_dim,
activation=None,
use_bias=False,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="memory_projector")
return context | 6ce8a9a7845376f1804610d371d23b32fa9991f7 | 14,898 |
from typing import Union
def rf_local_divide(left_tile_col: Column_type, rhs: Union[float, int, Column_type]) -> Column:
"""Divide two Tiles cell-wise, or divide a Tile's cell values by a scalar"""
if isinstance(rhs, (float, int)):
rhs = lit(rhs)
return _apply_column_function('rf_local_divide', left_tile_col, rhs) | 76740879461e6dea302568d3bebc4dd6d7eb9363 | 14,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.