content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def clique_ring(n_cluster=3, n_in_cluster=5):
"""Get adjacency matrix for cluster domain used by Schapiro et al 2013.
Args:
n_cluster: number of clusters, connected in a ring.
n_in_cluster: number of nodes in each cluster. Each node is connected to all
other nodes in cluster, except the edge connecting the two nodes with
outgoing edges is severed.
Returns:
adjmat: adjacency matrix
xy: xy coordinates of each state for plotting. Obtained by arranging nodes
within a cluster into evenly spaced circles, and then arranging those
clusters evenly around a circle.
labels: (n_state) array containing cluster index of each state
"""
n_state = n_cluster * n_in_cluster
clq, _, _ = clique(n_in_cluster)
clq[0, n_in_cluster-1] = 0
clq[n_in_cluster-1, 0] = 0
adj = clq
for i in range(n_cluster-1):
adj = block_diag(adj, clq)
for i in range(n_cluster):
i_curr = np.mod(i * n_in_cluster-1, n_state)
i_next = np.mod(i * n_in_cluster, n_state)
adj[i_curr, i_next] = 1
adj[i_next, i_curr] = 1
# get xy
clu_ind = np.repeat(np.arange(0, n_cluster).reshape(-1, 1),
n_in_cluster, axis=0).reshape(-1)
ang_clu = clu_ind * 1.0 / n_cluster * 2 * np.pi
x_clu = np.cos(ang_clu).reshape(-1, 1) * 2
y_clu = np.sin(ang_clu).reshape(-1, 1) * 2
offset = np.pi - ang_clu - np.pi/n_in_cluster # turn clusters toward center
ang_in_clu = np.linspace(0, 2*np.pi, n_in_cluster+1)[:n_in_cluster]
ang_in_clus = np.stack([ang_in_clu]*n_cluster).reshape(-1)
ang_in_clus = ang_in_clus - offset
x_in_clu = np.cos(ang_in_clus).reshape(-1, 1)
y_in_clu = np.sin(ang_in_clus).reshape(-1, 1)
# get cluster labels
labels = np.concatenate([np.ones(n_in_cluster) * i for i in range(n_cluster)])
return adj, np.concatenate([x_clu+x_in_clu, y_clu+y_in_clu], axis=1), labels | 2b8dad4b52e456a933c66af7198b6363eb839c41 | 10,033 |
def get_halfnormal_mean_from_scale(scale: float) -> float:
"""Returns the mean of the half-normal distribition."""
# https://en.wikipedia.org/wiki/Half-normal_distribution
return scale * np.sqrt(2) / np.sqrt(np.pi) | d5d0ac1e460d30ad544982a5f0bb7f463c64ede9 | 10,034 |
def cal_pr(y_hat, y_score):
"""
calculate the precision and recall curve
:param y_hat: ground-truth label, [n_sample]
:param y_score: predicted similarity score, [n_sample]
:return: [n_sample]
"""
thresholds = np.arange(1, -0.001, -0.001)
fps, tps = cal_binary_cls_curve(y_hat, y_score, thresholds)
pos_idx = tps > 0
tps = tps[pos_idx]
fps = fps[pos_idx]
thresholds = thresholds[pos_idx]
precision = tps / (tps + fps)
recall = tps / np.sum(y_hat)
return precision, recall, thresholds | a64e38a51b5e8c8bdb6bbc26f4c99ae3746dfc64 | 10,035 |
def alert_source_create(context, values):
"""Create an alert source."""
return IMPL.alert_source_create(context, values) | 7d55eed069b644c718ffb55f27d22a56c7483f73 | 10,036 |
def sanitise_utf8(s):
"""Ensure an 8-bit string is utf-8.
s -- 8-bit string (or None)
Returns the sanitised string. If the string was already valid utf-8, returns
the same object.
This replaces bad characters with ascii question marks (I don't want to use
a unicode replacement character, because if this function is doing anything
then it's likely that there's a non-unicode setup involved somewhere, so it
probably wouldn't be helpful).
"""
if s is None:
return None
try:
s.decode("utf-8")
except UnicodeDecodeError:
return (s.decode("utf-8", 'replace')
.replace(u"\ufffd", u"?")
.encode("utf-8"))
else:
return s | 11b864ade1c36e2b42ffbdd76ee2851f01ca7803 | 10,037 |
def trans_r2xy(r, phi, r_e, phi_e):
"""r,phi -> x,y """
x = np.array(r) * np.cos(phi)
y = np.array(r) * np.sin(phi)
err = np.array(
[polar_err(i, j, k, l) for i, j, k, l in zip(r, phi, r_e, phi_e)]
)
return x, y, err[:, 0], err[:, 1] | dcc9e1433bb40dd76d41b1031420600cdab96d67 | 10,038 |
def ldpc_bp_decode(llr_vec, ldpc_code_params, decoder_algorithm, n_iters):
"""
LDPC Decoder using Belief Propagation (BP).
Parameters
----------
llr_vec : 1D array of float
Received codeword LLR values from the channel.
ldpc_code_params : dictionary
Parameters of the LDPC code.
decoder_algorithm: string
Specify the decoder algorithm type.
SPA for Sum-Product Algorithm
MSA for Min-Sum Algorithm
n_iters : int
Max. number of iterations of decoding to be done.
Returns
-------
dec_word : 1D array of 0's and 1's
The codeword after decoding.
out_llrs : 1D array of float
LLR values corresponding to the decoded output.
"""
n_cnodes = ldpc_code_params['n_cnodes']
n_vnodes = ldpc_code_params['n_vnodes']
max_cnode_deg = ldpc_code_params['max_cnode_deg']
max_vnode_deg = ldpc_code_params['max_vnode_deg']
cnode_adj_list = ldpc_code_params['cnode_adj_list']
cnode_vnode_map = ldpc_code_params['cnode_vnode_map']
vnode_adj_list = ldpc_code_params['vnode_adj_list']
vnode_cnode_map = ldpc_code_params['vnode_cnode_map']
cnode_deg_list = ldpc_code_params['cnode_deg_list']
vnode_deg_list = ldpc_code_params['vnode_deg_list']
dec_word = np.zeros(n_vnodes, int)
out_llrs = np.zeros(n_vnodes, int)
cnode_msgs = np.zeros(n_cnodes*max_cnode_deg)
vnode_msgs = np.zeros(n_vnodes*max_vnode_deg)
_limit_llr_v = np.vectorize(_limit_llr)
if decoder_algorithm == 'SPA':
check_node_update = sum_product_update
elif decoder_algorithm == 'MSA':
check_node_update = min_sum_update
else:
raise NameError('Please input a valid decoder_algorithm string.')
# Initialize vnode messages with the LLR values received
for vnode_idx in range(n_vnodes):
start_idx = vnode_idx*max_vnode_deg
offset = vnode_deg_list[vnode_idx]
vnode_msgs[start_idx : start_idx+offset] = llr_vec[vnode_idx]
# Main loop of Belief Propagation (BP) decoding iterations
for iter_cnt in range(n_iters):
continue_flag = 0
# Check Node Update
for cnode_idx in range(n_cnodes):
check_node_update(cnode_idx, cnode_adj_list, cnode_deg_list, cnode_msgs,
vnode_msgs, cnode_vnode_map, max_cnode_deg, max_vnode_deg)
# Variable Node Update
for vnode_idx in range(n_vnodes):
# Compute sum of all incoming messages at the variable node
start_idx = vnode_idx*max_vnode_deg
offset = vnode_deg_list[vnode_idx]
cnode_list = vnode_adj_list[start_idx:start_idx+offset]
cnode_list_msgs = cnode_msgs[cnode_list*max_cnode_deg + vnode_cnode_map[start_idx:start_idx+offset]]
msg_sum = np.sum(cnode_list_msgs)
# Compute messages on outgoing edges using the incoming message sum
vnode_msgs[start_idx:start_idx+offset] = _limit_llr_v(llr_vec[vnode_idx] + msg_sum -
cnode_list_msgs)
# Update output LLRs and decoded word
out_llrs[vnode_idx] = llr_vec[vnode_idx] + msg_sum
if out_llrs[vnode_idx] > 0:
dec_word[vnode_idx] = 0
else:
dec_word[vnode_idx] = 1
# Compute if early termination using parity check matrix
for cnode_idx in range(n_cnodes):
p_sum = 0
for i in range(cnode_deg_list[cnode_idx]):
p_sum ^= dec_word[cnode_adj_list[cnode_idx*max_cnode_deg + i]]
if p_sum != 0:
continue_flag = 1
break
# Stop iterations
if continue_flag == 0:
break
return dec_word, out_llrs | c9bd44c386ead2f9b968eb3a8c211d7af5e26a25 | 10,039 |
def edit_style_formats(style_format_id, **kwargs):
"""Create or edit styles formats.
:param style_format_id: identifier of a specific style format
"""
if request.method == "POST":
args = request.get_json()
errors = StyleFormatsSchema().validate(args)
if errors:
return abort(400, str(errors))
style_format = data.create_style_format(**args)
return jsonify(StyleFormatsSchema().dump(style_format)), 201
if request.method == "DELETE":
data.delete_style_format(style_format_id)
return {'message': 'deleted'}, 204
if request.method == "PUT":
args = request.get_json()
errors = StyleFormatsMetadataSchema().validate(args)
if errors:
return abort(400, str(errors))
style_format = data.update_style_format(style_format_id, **args)
return jsonify(StyleFormatsSchema().dump(style_format)), 200 | a9c0cc004fb840ffcf2b9c0b45f43dc3535ea103 | 10,040 |
def volume_to_vtk(volelement, origin=(0.0, 0.0, 0.0)):
"""Convert the volume element to a VTK data object.
Args:
volelement (:class:`omf.volume.VolumeElement`): The volume element to
convert
"""
output = volume_grid_geom_to_vtk(volelement.geometry, origin=origin)
shp = get_volume_shape(volelement.geometry)
# Add data to output
for data in volelement.data:
arr = data.array.array
arr = np.reshape(arr, shp).flatten(order='F')
output[data.name] = arr
return output | 710152ebdb56592a1485fa0c451bf135679cc949 | 10,042 |
def _in_delta(value, target_value, delta) -> bool:
"""
Check if value is equal to target value within delta
"""
return abs(value - target_value) < delta | 92ab62a381fc1cfc6bbb82635f196ec4498babf4 | 10,043 |
def getpar(key, file='DATA/Par_file', sep='=', cast=str):
""" Reads parameter from SPECFEM parfile
"""
val = None
with open(file, 'r') as f:
# read line by line
for line in f:
if find(line, key) == 0:
# read key
key, val = _split(line, sep)
if not key:
continue
# read val
val, _ = _split(val, '#')
val.strip()
break
if val:
if cast == float:
val = val.replace('d', 'e')
return cast(val)
else:
print 'Not found in parameter file: %s\n' % key
raise Exception | 05a2cf904dd1c5cdb71dd302e2a74c3397a6d1e2 | 10,044 |
from typing import Dict
def create_ok_response() -> flask.Response:
"""Creates a 200 OK response.
:return: flask.Response.
"""
ok_body: Dict[str, str] = {"status": "OK"}
return make_response(jsonify(ok_body), HTTP_200_OK) | 4b60c712a1b123c8daa976239cf5abd813e50221 | 10,046 |
import time
def format_timestamp(timestamp):
"""Formats an UTC timestamp into a date string.
>>> format_timestamp("2014-04-08T12:41:34+0100")
'Tue, 08 Apr 2014 12:41:34'
"""
t = iso8601.parse_date(timestamp).timetuple()
return time.strftime("%a, %d %b %Y %H:%M:%S", t) | f551c5bb984ad9d23d0c1d21103f340e6e4b104b | 10,049 |
def _flat(xvals):
"""
Function for flat surface y=0, with boundary conditions
Parameters
----------
xvals : np.array
x-values of the surface.
Returns
-------
yvals : np.array
y-Values of the initialized surface.
"""
yvals = np.zeros_like(xvals)
return yvals | 632ad5fa9acc30e7fae07942890dd9060ab6c859 | 10,050 |
import torch
def regularized_laplacian(weights, labels, alpha):
"""Uses the laplacian graph to smooth the labels matrix by "propagating" labels
Args:
weights: Tensor of shape (batch, n, n)
labels: Tensor of shape (batch, n, n_classes)
alpha: Scaler, acts as a smoothing factor
apply_log: if True, it is assumed that the label propagation methods returns un-normalized probabilities. Hence
to return logits, applying logarithm is necessary.
epsilon: value added before applying log
Returns:
Tensor of shape (batch, n, n_classes) representing the logits of each classes
"""
n = weights.shape[1]
diag = torch.diag_embed(torch.sum(weights, dim=2))
laplacian = diag - weights
identity = torch.eye(n, dtype=laplacian.dtype, device=laplacian.device)[None, :, :]
propagator = torch.inverse(identity + alpha * laplacian)
return _propagate(labels, propagator), propagator | 12725881a121a3fb3455c0905d8db1a90b08dc4d | 10,051 |
def weight_variable_glorot(input_dim, output_dim, name=""):
"""Create a weight variable with Glorot & Bengio (AISTATS 2010)
initialization.
"""
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = tf.random_uniform([input_dim, output_dim], minval=-init_range,
maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name) | 85b7ba1f46d0e154425cc884202021faf621bc0e | 10,052 |
from typing import Tuple
def pareto_plot(column: pd.Series,
use_given_index: bool = False,
figsize: Tuple[int, int] = (12, 8),
return_freq_df: bool = False):
"""
Draw Pareto plot for categorical variable
Arguments:
----------
column: pd.Series
Categorical input
figsize: Tuple
size of the figure
return_freq_df: bool
Returns frequency dataframe if True
Example:
--------
>>> pareto_plot(df['state'], figsize=(20, 10))
>>> df = pareto_plot(df['area code'], return_freq_df=True)
>>> df
label frequency cumpercentage
0 415 1655 49.654965
1 510 840 74.857486
2 408 838 100.000000
"""
freq = column.copy()
if use_given_index:
freq = column.value_counts().sort_values(ascending=False)
freq_df = pd.DataFrame({'label': freq.index,
'frequency': freq.values})
freq_df['cumpercentage'] = freq_df['frequency'].cumsum()/freq_df['frequency'].sum()*100
# plot
fig, ax = plt.subplots(figsize=figsize)
ax.bar(freq_df.index, freq_df['frequency'],
color='C0')
ax2 = ax.twinx()
ax2.plot(freq_df.index, freq_df['cumpercentage'],
color='C1', marker='D', ms=7)
ax2.yaxis.set_major_formatter(PercentFormatter())
ax.set_xticks(freq_df.index)
ax.set_xticklabels(freq_df['label'], fontsize=10,
rotation=35)
ax.tick_params(axis='y', colors='C0')
ax2.tick_params(axis='y', colors='C1')
plt.show()
if return_freq_df:
return freq_df | 8bf2f098a93076356ae00e702a05e4b831811609 | 10,053 |
from typing import Optional
from typing import Dict
import json
def remove_external_id(
role_name: str,
dir_path: Optional[str],
session=None,
client=None,
backup_policy: Optional[str] = "",
bucket: Optional[str] = None,
) -> Dict:
"""The remove_external_id method takes a role_name as a string
to allow the removal of an externalId condition.
"""
if session:
iam_client = session.client("iam")
elif client:
iam_client = client
else:
iam_client = boto3.client("iam")
role = iam_client.get_role(RoleName=role_name)
arpd = role["Role"]["AssumeRolePolicyDocument"]
if backup_policy:
if backup_policy.lower() == "local":
if dir_path:
retain_policy(
policy=arpd,
role_name=role_name,
location_type="local",
dir_path=dir_path,
)
else:
retain_policy(policy=arpd, role_name=role_name, location_type="local")
elif backup_policy.lower() == "s3":
retain_policy(
policy=arpd, role_name=role_name, location_type="s3", bucket=bucket
)
arpd["Statement"][0]["Condition"] = {}
try:
iam_client.update_assume_role_policy(
RoleName=role_name, PolicyDocument=json.dumps(arpd)
)
return arpd
except ClientError as error:
raise error | 711fbe0bf12206688b3d372d97fe0e10f1aa59e1 | 10,056 |
def find_binaries(fw_path):
"""
Gets a list of possible binaries within a firmare sample.
The list might contain false positives, angr will ignore them.
:param fw_path: firmware path
:return: a list of binaries
"""
cmd = "find \""+ fw_path + "\""
cmd += " -executable -type f -exec file {} \; | grep -iv image | grep -iv text | awk -F':' '{print $1}'"
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
o, e = p.communicate()
if o:
return o.split('\n')
return [] | 53d4a8f8a9abcc9404392a1ba317fde2e583bc93 | 10,057 |
def get_core_count():
"""
Find out how many CPU cores this system has.
"""
try:
cores = str(compat.enum_cpus()) # 3.4 and up
except NotImplementedError:
cores = "1" # 3.2-3.3
else:
if compat.enum_cpus() is None:
cores = "1"
return cores | 2bd49d6189ba4f6ee92ae3e54cb629a8fb70e440 | 10,058 |
def vec2str(vec):
""" transform the vector to captcha str"""
_str = ""
for i in range(4):
v = vec[i*43: (i+1)*43]
_str += chr(np.argwhere(v == 1)[0][0] + ord('0'))
return _str | 9f927b9b084b2aeff26686a0066bdbfb9ad4e3f3 | 10,060 |
def mnist(path=None, batchsize=20, xpreptrain=None, ypreptrain=None, dataset="train", **kwargs):
"""
Legacy MNIST loader.
:type path: str
:param path: Path to MNIST pickle file.
:type batchsize: int
:param batchsize: Batch size (no shit sherlock)
:type xpreptrain: prepkit.preptrain
:param xpreptrain: Train of preprocessing functions on X. See preptrain's documentation in prepkit.
:type ypreptrain: prepkit.preptrain
:param ypreptrain: Train of preprocessing functions on Y. Can be set to -1 to channel X,Y through xpreptrain.
:type dataset: str
:param dataset: Which dataset to use ('train', 'test' or 'validate')
:rtype: tincan
"""
# Compatibility patch
if "preptrain" in kwargs.keys():
xpreptrain = kwargs["preptrain"]
# Parse path
if path is None:
path = "/Users/nasimrahaman/Documents/Python/DeepBrain/Datasets/mnist.pkl"
# Unpickle data
data = pkl.load(open(path))
# Load the correct X and Y data
assert dataset in ["train", "test", "validate"], "Dataset can be either of the three strings: " \
"'train', 'test', 'validate'. "
datindex = 0 if dataset is "train" else 1 if dataset is "test" else 2
X, Y = data[datindex]
# Generate MNIST tincan
return tincan(data=(X, Y), numclasses=10, batchsize=batchsize, xpreptrain=xpreptrain, ypreptrain=ypreptrain,
xhowtransform=['b', 1, 's', 's'], yhowtransform=['b', 'nc', 1, 1]) | 65ab0c0ad529f5b9b6803585d00fb044a82db2a5 | 10,061 |
def rand_pad(ctvol):
"""Introduce random padding between 0 and 15 pixels on each of the 6 sides
of the <ctvol>"""
randpad = np.random.randint(low=0,high=15,size=(6))
ctvol = np.pad(ctvol, pad_width = ((randpad[0],randpad[1]), (randpad[2],randpad[3]), (randpad[4], randpad[5])),
mode = 'constant', constant_values = np.amin(ctvol))
return ctvol | 83dd1de5c9914127c1d7fcc8d5e5068aa9f2864c | 10,062 |
from typing import Tuple
def _improve(tour: np.ndarray, matrix: np.ndarray, neighbours: np.ndarray, dlb: np.ndarray,
it1: int, t1: int, solutions: set, k: int) -> Tuple[float, np.ndarray]:
""" Последовательный 2-opt для эвристики Лина-Кернига
tour: список городов
matrix: матрица весов
neighbours: набор кандидатов
dlb: don't look bits
it1, t1: индекс, значение города, с которого начинать
solutions: полученные ранее туры
set_x, set_y: наборы удаленных, добавленных ребер
k: k-opt, k - кол-во сколько можно сделать последовательных улучшений
return: выигрыш, новый тур
"""
around_t1 = around(tour, it1)
for it2, t2 in around_t1:
set_x = {make_pair(t1, t2)}
for t3 in neighbours[t2]:
gain = matrix[t1][t2] - matrix[t2][t3]
if t3 == around_t1[0][1] or t3 == around_t1[1][1] or not gain > 1.e-10:
continue
set_y = {make_pair(t2, t3)}
it3 = np.where(tour == t3)[0][0]
_gain, _tour = __choose_t4(tour, matrix, it1, it2, it3, neighbours, gain, set_x, set_y, dlb, solutions, k)
if _gain > 1.e-10:
return _gain, _tour
return 0., tour | 982a575fcde8e78186259f1970dc18850fd3b93e | 10,063 |
def plot_step_with_errorbar(lefts, widths, y_coords, y_errs,
errors_enabled=True, use_errorrects_for_legend=False, **kwargs):
"""Makes a step plot with error bars."""
lefts.append(lefts[-1] + widths[-1])
y_coords.append(y_coords[-1])
# prevent that we have labels for the step and the errorbar,
# otherwise we have two legend entries per data set
step_kwargs = dict(kwargs)
rect_kwargs = dict(kwargs)
if errors_enabled and "label" in kwargs:
if use_errorrects_for_legend:
del step_kwargs["label"]
else:
del rect_kwargs["label"]
# delete kw args that are not defined for plt.step
try:
del step_kwargs["hatch"]
except KeyError:
pass
step_result = plt.step(lefts, y_coords, where='post', **step_kwargs)
if errors_enabled:
try:
ecolor = rect_kwargs["color"]
del rect_kwargs["color"]
except KeyError:
ecolor = plt.gca().lines[-1].get_color() # do not use the next color from the color cycle
try:
del rect_kwargs["marker"]
except KeyError:
pass
try:
del rect_kwargs["zorder"]
except KeyError:
pass
zorder = plt.gca().lines[-1].get_zorder() - 1 # make sure it's drawn below
errorrects_result = plot_errorrects(lefts, y_coords, y_errs, ecolor, zorder, **rect_kwargs)
# x_mids = [left + width / 2.0 for left, width in zip(lefts[:-1], widths)]
# plt.errorbar(x_mids, y_coords[:-1], fmt='none', yerr=y_errs, ecolor=ecolor)
else:
errorrects_result = None
return step_result, errorrects_result | e532e71ada503474e5d52b24a1bf2a7fb2418e82 | 10,064 |
def intensity_modification(x):
""" Intensity modification
Parameters
x: Tensor
Returns
x: Tensor
"""
x = x + tf.random.uniform(shape=[], minval=-0.05, maxval=0.05, dtype=tf.dtypes.float32)
return x | c2ad13b6b123b3f053b88373ecfe7f4adfec87a3 | 10,065 |
def FormIdProperty(expression, **kwargs):
"""
Create a StringProperty that references a form ID. This is necessary because
form IDs change when apps are copied so we need to make sure we update
any references to the them.
:param expression: jsonpath expression that can be used to find the field
:param kwargs: arguments to be passed to the underlying StringProperty
"""
path_expression = parse(expression)
assert isinstance(path_expression, jsonpath.Child), "only child path expressions are supported"
field = path_expression.right
assert len(field.fields) == 1, 'path expression can only reference a single field'
form_id_references.append(path_expression)
return StringProperty(**kwargs) | 5ac621dbd69df060de5280e8d893149ecb715b6f | 10,066 |
import secrets
def do_roll(dice: int, sides: int, _: int):
"""Given an amount of dice and the number of sides per die, simulate a dice roll and return
a list of ints representing the outcome values.
Modifier is ignored.
"""
dice = dice or 1
sides = sides or 1
values = sorted(((secrets.randbelow(sides) + 1) for _ in range(0, dice)), reverse=True)
return values | 2073a37e5b76a85182e8cf786707ed18ca3f2474 | 10,067 |
def compute_logp_independent_block(X, alpha=None):
"""Compute the analytical log likelihood of a matrix under the
assumption of independence.
"""
if alpha is None: alpha = np.ones(X.shape[1])
logp_ib = gammaln(alpha.sum()) - (gammaln(alpha)).sum()
logp_ib += gammaln(X.sum(0) + alpha).sum() - gammaln(X.sum() + alpha.sum())
logp_ib += gammaln(X.sum(1) + 1).sum() - gammaln(X + 1).sum()
return logp_ib | 831cdc63f8e131d3dfb797e054dfcd421f939ed5 | 10,068 |
def check_validity_label(labels):
"""
Check to see whether it makes a valid tuple
Parameters:
-----------
labels: A tuple of labels (Object_1, Object_2, Object_3,
Return:
-------
"""
# Event is None -> All other values are None
if labels[3] == 0:
for i in xrange(5):
if labels[i] != 0:
return False
return True
# If two roles have the same object return False
for i in xrange(3):
for j in xrange(3):
if i != j and labels[i] == labels[j] and labels[i] != 0:
return False
# If there is a Theme, there needs to be a Preposition and vice versa
if labels[2] != 0 and labels[4] == 0:
return False
if labels[2] == 0 and labels[4] != 0:
return False
return True | c5a3d75813ab521b1e56789d64e7f14861075fb0 | 10,069 |
def flat_proj(v1, v2):
""" Returns the flat projection of direction unit vector, v1 onto v2 """
temp1 = np.cross(v1, v2)
temp2 = np.cross(temp1, v1)
return proj(temp2, v2) | 8a75dc118940cad6735f361ae3214358d78881e9 | 10,070 |
import torch
from typing import Optional
from typing import Tuple
from typing import List
def marching_cubes_naive(
volume_data_batch: torch.Tensor,
isolevel: Optional[float] = None,
spacing: int = 1,
return_local_coords: bool = True,
) -> Tuple[List[torch.Tensor], List[torch.Tensor]]:
"""
Runs the classic marching cubes algorithm, iterating over
the coordinates of the volume_data and using a given isolevel
for determining intersected edges of cubes of size `spacing`.
Returns vertices and faces of the obtained mesh.
This operation is non-differentiable.
This is a naive implementation, and is not optimized for efficiency.
Args:
volume_data_batch: a Tensor of size (N, D, H, W) corresponding to
a batch of 3D scalar fields
isolevel: the isosurface value to use as the threshold to determine
whether points are within a volume. If None, then the average of the
maximum and minimum value of the scalar field will be used.
spacing: an integer specifying the cube size to use
return_local_coords: bool. If True the output vertices will be in local coordinates in
the range [-1, 1] x [-1, 1] x [-1, 1]. If False they will be in the range
[0, W-1] x [0, H-1] x [0, D-1]
Returns:
verts: [(V_0, 3), (V_1, 3), ...] List of N FloatTensors of vertices.
faces: [(F_0, 3), (F_1, 3), ...] List of N LongTensors of faces.
"""
volume_data_batch = volume_data_batch.detach().cpu()
batched_verts, batched_faces = [], []
D, H, W = volume_data_batch.shape[1:]
volume_size_xyz = volume_data_batch.new_tensor([W, H, D])[None]
if return_local_coords:
# Convert from local coordinates in the range [-1, 1] range to
# world coordinates in the range [0, D-1], [0, H-1], [0, W-1]
local_to_world_transform = Translate(
x=+1.0, y=+1.0, z=+1.0, device=volume_data_batch.device
).scale((volume_size_xyz - 1) * spacing * 0.5)
# Perform the inverse to go from world to local
world_to_local_transform = local_to_world_transform.inverse()
for i in range(len(volume_data_batch)):
volume_data = volume_data_batch[i]
curr_isolevel = (
((volume_data.max() + volume_data.min()) / 2).item()
if isolevel is None
else isolevel
)
edge_vertices_to_index = {}
vertex_coords_to_index = {}
verts, faces = [], []
# Use length - spacing for the bounds since we are using
# cubes of size spacing, with the lowest x,y,z values
# (bottom front left)
for x in range(0, W - spacing, spacing):
for y in range(0, H - spacing, spacing):
for z in range(0, D - spacing, spacing):
cube = Cube((x, y, z), spacing)
new_verts, new_faces = polygonise(
cube,
curr_isolevel,
volume_data,
edge_vertices_to_index,
vertex_coords_to_index,
)
verts.extend(new_verts)
faces.extend(new_faces)
if len(faces) > 0 and len(verts) > 0:
verts = torch.tensor(verts, dtype=torch.float32)
# Convert vertices from world to local coords
if return_local_coords:
verts = world_to_local_transform.transform_points(verts[None, ...])
verts = verts.squeeze()
batched_verts.append(verts)
batched_faces.append(torch.tensor(faces, dtype=torch.int64))
return batched_verts, batched_faces | a7a4ac4a08bbc270091acc2ddd6a84eb4ee0ba37 | 10,071 |
def gc_resnet101(num_classes):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(GCBottleneck, [3, 4, 23, 3], num_classes=num_classes)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model | e6bb2e5e97fcf81d6abba34a2ee6c0638d39edf8 | 10,073 |
def compute_seatable_votes(votes, votetypes):
"""Compute the seatable votes.
Parameters
----------
votes: pandas.DataFrame
the votes of the seatable votes.
votetypes: dict
the information of the different types of vote variables.
Returns
-------
seatable_votes: numpy.ndarray
the variables which have votes which can be transformed into seats.
"""
votetypes = prepare_votetypes(votes, votetypes)
seatable_votes = votes[votetypes['seatable']]
return seatable_votes | 1f8a32589918236e00d1c702cb23ecbc19d0cccb | 10,075 |
from typing import Optional
async def read_cookie(refresh_token: Optional[str] = Cookie(None)) -> JSONResponse:
"""Reads a cookie.
Args:
refresh_token: Name of the cookie.
Returns:
JSONResponse:
Returns the value of the cookie as a json blurb.
"""
if refresh_token:
return JSONResponse(
content={
"refresh_token": refresh_token
},
status_code=200,
headers=RESET_HEADERS
)
else:
return JSONResponse(
content={
"refresh_token": status.HTTP_404_NOT_FOUND
},
status_code=404,
headers=RESET_HEADERS
) | f7e4e20f138b24a6d1beda76b2c2e565f28e513c | 10,076 |
def readAirfoilFile(fileName, bluntTe=False, bluntTaperRange=0.1, bluntThickness=0.002):
"""Load the airfoil file"""
f = open(fileName)
line = f.readline() # Read (and ignore) the first line
r = []
try:
r.append([float(s) for s in line.split()])
except Exception:
pass
while 1:
line = f.readline()
if not line:
break # end of file
if line.isspace():
break # blank line
r.append([float(s) for s in line.split()])
rr = np.array(r)
x = rr[:, 0]
y = rr[:, 1]
npt = len(x)
xMin = min(x)
# There are 4 possibilites we have to deal with:
# a. Given a sharp TE -- User wants a sharp TE
# b. Given a sharp TE -- User wants a blunt TE
# c. Given a blunt TE -- User wants a sharp TE
# d. Given a blunt TE -- User wants a blunt TE
# (possibly with different TE thickness)
# Check for blunt TE:
if bluntTe is False:
if y[0] != y[-1]:
print("Blunt Trailing Edge on airfoil: %s" % (fileName))
print("Merging to a point over final %f ..." % (bluntTaperRange))
yAvg = 0.5 * (y[0] + y[-1])
xAvg = 0.5 * (x[0] + x[-1])
yTop = y[0]
yBot = y[-1]
xTop = x[0]
xBot = x[-1]
# Indices on the TOP surface of the wing
indices = np.where(x[0 : npt // 2] >= (1 - bluntTaperRange))[0]
for i in range(len(indices)):
fact = (x[indices[i]] - (x[0] - bluntTaperRange)) / bluntTaperRange
y[indices[i]] = y[indices[i]] - fact * (yTop - yAvg)
x[indices[i]] = x[indices[i]] - fact * (xTop - xAvg)
# Indices on the BOTTOM surface of the wing
indices = np.where(x[npt // 2 :] >= (1 - bluntTaperRange))[0]
indices = indices + npt // 2
for i in range(len(indices)):
fact = (x[indices[i]] - (x[-1] - bluntTaperRange)) / bluntTaperRange
y[indices[i]] = y[indices[i]] - fact * (yBot - yAvg)
x[indices[i]] = x[indices[i]] - fact * (xBot - xAvg)
elif bluntTe is True:
# Since we will be rescaling the TE regardless, the sharp TE
# case and the case where the TE is already blunt can be
# handled in the same manner
# Get the current thickness
curThick = y[0] - y[-1]
# Set the new TE values:
xBreak = 1.0 - bluntTaperRange
# Rescale upper surface:
for i in range(0, npt // 2):
if x[i] > xBreak:
s = (x[i] - xMin - xBreak) / bluntTaperRange
y[i] += s * 0.5 * (bluntThickness - curThick)
# Rescale lower surface:
for i in range(npt // 2, npt):
if x[i] > xBreak:
s = (x[i] - xMin - xBreak) / bluntTaperRange
y[i] -= s * 0.5 * (bluntThickness - curThick)
return x, y | 3b3da70ff36dc3a4ab2a186ee9712978f2658294 | 10,077 |
import re
def depListToArtifactList(depList):
"""Convert the maven GAV to a URL relative path"""
regexComment = re.compile('#.*$')
#regexLog = re.compile('^\[\w*\]')
artifactList = []
for nextLine in depList:
nextLine = regexComment.sub('', nextLine)
nextLine = nextLine.strip()
gav = maven_repo_util.parseGATCVS(nextLine)
if gav:
artifactList.append(MavenArtifact.createFromGAV(gav))
return artifactList | 52d27c3310a4fd17df857df4725079a4d93faa76 | 10,079 |
def configure_plugins_plugin_install_to_version(request, pk, version):
"""
View rendering for the install to version modal interface
:param request: Request
:param pk: The primary key for the plugin
:param version: The version to install
:return: a renderer
"""
plugin = get_object_or_404(Plugin, pk=pk)
action = reverse(
"api_dispatch_install_to_version",
kwargs={
"api_name": "v1",
"resource_name": "plugin",
"pk": pk,
"version": version,
},
)
_installVersionedName = Plugin(name=plugin.name, version=version).versionedName()
ctx = RequestContext(
request,
{
"method": "POST",
"action": action,
"i18n": {
"title": ugettext_lazy(
"configure_plugins_plugin_install_to_version.title"
), # 'Confirm Install Plugin'
"confirmmsg": ugettext_lazy(
"configure_plugins_plugin_install_to_version.messages.confirmmsg.singular"
)
% { # 'Are you sure you want to install %(versionedName)s?'
"versionedName": _installVersionedName
},
"submit": ugettext_lazy(
"configure_plugins_plugin_install_to_version.action.submit"
), # 'Yes, Upgrade!'
"cancel": ugettext_lazy("global.action.modal.cancel"),
"submitmsg": ugettext_lazy(
"configure_plugins_plugin_install_to_version.messages.submitmsg"
), # 'Now upgrading, please wait.'
},
},
)
return render_to_response(
"rundb/configure/modal_confirm_plugin_install_to_version.html",
context_instance=ctx,
) | 96e1076bdb84d6e0758d5ba03777a6576889cfdc | 10,080 |
def _parameters_to_vector(parameters):
"""
This fix is required for pytorch >= 1.6.0, due to the change
in memory format promotion rule.
For more info, check:
* https://github.com/pytorch/pytorch/pull/37968
* https://github.com/pytorch/pytorch/releases/tag/v1.6.0
and search "Note: BC-breaking memory format changes"
"""
parameters = [p.contiguous() for p in parameters]
return th.nn.utils.parameters_to_vector(parameters) | f3b7d4cb8262cbbcbe2e5abace6e8e8162fb3a57 | 10,081 |
from utils.snowflake.id_worker import IdWorker
from utils.limiter import limiter as lmt
from utils.logging import create_logger
from utils.converters import register_converters
from redis.sentinel import Sentinel
from rediscluster import StrictRedisCluster
from models import db
from .resources.user import user_bp
from .resources.news import news_bp
from .resources.notice import notice_bp
from .resources.search import search_bp
def create_app(config, enable_config_file=False):
"""
创建应用
:param config: 配置信息对象
:param enable_config_file: 是否允许运行环境中的配置文件覆盖已加载的配置信息
:return: 应用
"""
app = create_flask_app(config, enable_config_file)
# 创建Snowflake ID worker
app.id_worker = IdWorker(app.config['DATACENTER_ID'],
app.config['WORKER_ID'],
app.config['SEQUENCE'])
# 限流器
lmt.init_app(app)
# 配置日志
create_logger(app)
# 注册url转换器
register_converters(app)
_sentinel = Sentinel(app.config['REDIS_SENTINELS'])
app.redis_master = _sentinel.master_for(app.config['REDIS_SENTINEL_SERVICE_NAME'])
app.redis_slave = _sentinel.slave_for(app.config['REDIS_SENTINEL_SERVICE_NAME'])
app.redis_cluster = StrictRedisCluster(startup_nodes=app.config['REDIS_CLUSTER'])
# rpc
# app.rpc_reco = grpc.insecure_channel(app.config['RPC'].RECOMMEND)
# Elasticsearch
app.es = Elasticsearch(
app.config['ES'],
# sniff before doing anything
sniff_on_start=True,
# refresh nodes after a node fails to respond
sniff_on_connection_fail=True,
# and also every 60 seconds
sniffer_timeout=60
)
# socket.io
# app.sio = socketio.KombuManager(app.config['RABBITMQ'], write_only=True)
# MySQL数据库连接初始化
db.init_app(app)
# # 添加请求钩子
# from utils.middlewares import jwt_authentication
# app.before_request(jwt_authentication)
# 注册用户模块蓝图
app.register_blueprint(user_bp)
# 注册新闻模块蓝图
app.register_blueprint(news_bp)
# 注册通知模块
app.register_blueprint(notice_bp)
# 搜索
app.register_blueprint(search_bp)
return app | 1284a53c24d7fc4bf2ce0a0f00d6a3defea642d6 | 10,082 |
def select_variables(expr):
"""When called on an expression, will yield selectors to the variable.
A selector will either return the variable (or equivalent fragment) in
an expression, or will return an entirely new expression with the
fragment replaced with the value of `swap`.
e.g.
>>> from qiime2.core.type.tests.test_grammar import (MockTemplate,
... MockPredicate)
>>> Example = MockTemplate('Example', fields=('x',))
>>> Foo = MockTemplate('Foo')
>>> Bar = MockPredicate('Bar')
>>> T = TypeMatch([Foo])
>>> U = TypeMatch([Bar])
>>> select_u, select_t = select_variables(Example[T] % U)
>>> t = select_t(Example[T] % U)
>>> assert T is t
>>> u = select_u(Example[T] % U)
>>> assert U is u
>>> frag = select_t(Example[Foo] % Bar)
>>> assert frag is Foo
>>> new_expr = select_t(Example[T] % U, swap=frag)
>>> assert new_expr == Example[Foo] % U
"""
if type(expr) is TypeVarExp:
def select(x, swap=None):
if swap is not None:
return swap
return x
yield select
return
if type(expr) is not TypeExp:
return
if type(expr.full_predicate) is TypeVarExp:
def select(x, swap=None):
if swap is not None:
return x.duplicate(predicate=swap)
return x.full_predicate
yield select
for idx, field in enumerate(expr.fields):
for sel in select_variables(field):
# Without this closure, the idx in select will be the last
# value of the enumerate, same for sel
# (Same problem as JS with callbacks inside a loop)
def closure(idx, sel):
def select(x, swap=None):
if swap is not None:
new_fields = list(x.fields)
new_fields[idx] = sel(x.fields[idx], swap)
return x.duplicate(fields=tuple(new_fields))
return sel(x.fields[idx])
return select
yield closure(idx, sel) | a147b1f1fc66373597b98085b13ffd326baf72e1 | 10,083 |
from typing import Callable
from typing import Optional
import glob
def get_login(name_p: str, pass_p: str, auth_error: bytes = b'') -> Callable:
"""Decorator to ensure a player's login information is correct."""
# NOTE: this function does NOT verify whether the arguments have
# been passed into the connection, and assumes you have already
# called the appropriate decorator above, @required_x.
def wrapper(f: Callable) -> Callable:
# modify the handler code to get the player
# object before calling the handler itself.
@wraps(f)
async def handler(conn: Connection) -> Optional[bytes]:
# args may be provided in regular args
# or multipart, but only one at a time.
argset = conn.args or conn.multipart_args
if not (
p := await glob.players.get_login(
name = unquote(argset[name_p]),
pw_md5 = argset[pass_p]
)
):
# player login incorrect
return auth_error
# login verified, call the handler
return await f(p, conn)
return handler
return wrapper | 3b3a1eb36d92de373eab9414abef6dd44bf14502 | 10,084 |
import math
def map_visualize(df: gpd.GeoDataFrame,
lyrs='s',
scale=0.5,
figsize = (12,9),
color = "red",
ax = None,
fig=None,
*args, **kwargs):
"""Draw the geodataframe with the satellite image as the background
Args:
`df` (gpd.GeoDataFrame): the gpd.GeoDataFrame need to plot
`ax`: the ax define to draw
`lyrs` (str, optional): [ m 路线图; t 地形图; p 带标签的地形图; s 卫星图; y 带标签的卫星图; h 标签层(路名、地名等)]. Defaults to 'p'.
`scale` (float): border percentage
`color`: the color the the geometry drawed
Returns:
[ax]: [description]
"""
# lyrs='y';scale=0.5;figsize = (12,9); color = "red";ax = None;fig=None;
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
df.plot(color = color, ax=ax, zorder=1, *args, **kwargs)
# df.plot(color = color, zorder=1)
[x0, x1], [y0, y1] = plt.xlim(), plt.ylim()
gap_x, gap_y = (x1-x0), (y1-y0)
[a, b, c, d] = df.total_bounds
if a == c:
x0, x1 = a - 0.001, c + 0.001
gap_x = x1- x0
if b == d:
y0, y1 = b - 0.001, d + 0.001
gap_y = y1 - y0
if not 0.4 <= gap_y / gap_x <= 2.5:
mid_x, mid_y = (x1+x0)/2, (y1+y0)/2
gap = max(gap_x, gap_y) * (1 + scale) / 2
[x0, y0, x1, y1] = [mid_x - gap, mid_y - gap, mid_x + gap, mid_y + gap]
else:
[x0, y0, x1, y1] = [x0-(x1-x0) * scale, y0+(y0-y1) * scale,
x1+(x1-x0) * scale, y1-(y0-y1) * scale]
zoom = 15 - int(math.log2(haversine((x0, y1), (x1, y0))/3))
# print([x0, x1], [y0, y1], haversine((x0, y1), (x1, y0))/3)
# warming: if zoom big than 19 then there will be somthing wrong
zoom = 19 if zoom > 19 else zoom
img = tile.Tiles()
f_lst, img_bbox = img.get_tiles_by_bbox([x0, y1, x1, y0], zoom, lyrs)
to_image = merge_tiles(f_lst)
background, _ = clip_background( to_image, img_bbox, [x0, y1, x1, y0], False)
ax.imshow(background, extent=[x0, x1, y0, y1], alpha=.6, zorder=0)
plt.xlim(x0, x1)
plt.ylim(y0, y1)
# 去除科学记数法
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.get_yaxis().get_major_formatter().set_useOffset(False)
# set_major_locator
# ax.xaxis.set_major_locator(plt.NullLocator())
# ax.yaxis.set_major_locator(plt.NullLocator())
return fig, ax | f59c72079f789e63ad7910e5c4ee62d93e5015e9 | 10,085 |
def unorm_to_byte(x):
"""float x in [0, 1] to an integer [0, 255]"""
return min(int(256 * x), 255) | a6870a339b9b0d5466962a9129c717876d8d0a50 | 10,086 |
def eigh(a, largest: bool = False):
"""
Get eigenvalues / eigenvectors of hermitian matrix a.
Args:
a: square hermitian float matrix
largest: if True, return order is based on descending eigenvalues, otherwise
ascending.
Returns:
w: [m] eigenvalues
v: [m, m] eigenvectors
"""
return _eigh(a, largest) | 254f243bd5c70f606cc67111df03626a0eae25b0 | 10,087 |
def lowpass(x, dt, fc, order=5):
"""
Low pass filter data signal x at cut off frequency fc, blocking harmonic content above fc.
Parameters
----------
x : array_like
Signal
dt : float
Signal sampling rate (s)
fc : float
Cut off frequency (Hz)
order : int, optional
Butterworth filter order. Default 5.
Returns
-------
array
Filtered signal
See Also
--------
scipy.signal.butter, scipy.signal.filtfilt
"""
nyq = 0.5 * 1. / dt # nyquist frequency
normal_cutoff = fc / nyq # normalized cut off frequency
b, a = butter(order, normal_cutoff, btype='lowpass', analog=False)
y = filtfilt(b, a, x)
return y | fd3cd4f7ccca9c2244c82420a560199633d082ab | 10,088 |
def doRunFixPlanets(msName):
"""Generate code for running fixplanets on fields with (0,0) coordinates"""
print('\n*** doRunFixPlanets ***')
fieldIds = sfsdr.getFieldsForFixPlanets(msName)
if len(fieldIds) != 0:
casaCmd = ''
mytb = aU.createCasaTool(tbtool)
mytb.open(msName+'/FIELD')
fieldNames = mytb.getcol('NAME')
mytb.close()
fieldNames = ['%s' %fieldNames[i] for i in fieldIds]
fieldNames = ','.join(fieldNames)
fieldIds = ['%s' %i for i in fieldIds]
fieldIds = ','.join(fieldIds)
casaCmd = casaCmd + "fixplanets(vis = '"+msName+"',\n"
casaCmd = casaCmd + " field = '"+fieldIds+"', # "+fieldNames+"\n"
casaCmd = casaCmd + " fixuvw = True)\n"
return casaCmd | 2656505e91eeea545c1c91c169b183ac5dd5413a | 10,089 |
def add_name_suffix(
suffix, obj_names=None, filter_type=None, add_underscore=False, search_hierarchy=False,
selection_only=True, **kwargs):
"""
Add prefix to node name
:param suffix: str, string to add to the end of the current node
:param obj_names: str or list(str), name of list of node names to rename
:param filter_type: str, name of object type to filter the objects to apply changes ('Group, 'Joint', etc)
:param add_underscore: bool, Whether or not to add underscore before the suffix
:param search_hierarchy: bool, Whether to search objects in hierarchies
:param selection_only: bool, Whether to search only selected objects or all scene objects
:param kwargs:
"""
rename_shape = kwargs.get('rename_shape', True)
if filter_type:
return name.add_suffix_by_filter(
suffix=suffix, filter_type=filter_type, add_underscore=add_underscore, rename_shape=rename_shape,
search_hierarchy=search_hierarchy, selection_only=selection_only, dag=False, remove_maya_defaults=True,
transforms_only=True)
else:
return name.add_suffix(
suffix=suffix, obj_names=obj_names, add_underscore=add_underscore, rename_shape=rename_shape) | c9355a5030c430d6efa6d8abc6b6d9128f77cb8e | 10,090 |
def checksum(hdpgroup: list,
algorithm: str = 'CRC32',
chktag: str = '\'α') -> list:
"""List of checksums-like for detection of Non-intentional data corruption
See https://en.wikipedia.org/wiki/Cksum
See https://en.wikipedia.org/wiki/Checksum
Args:
hdpgroup (list): list of HDP-like objects
type (str): The type of checker
htag (str): select only by special tags (for complex documents) mixing
several hashings. See hashable()
Returns:
list: List of strings optimized to be used as input for hashing
>>> import hxlm.core as HXLm
>>> UDUR_LAT = HXLm.util.load_file(HXLm.HDATUM_UDHR + '/udhr.lat.hdp.yml')
>>> checksum(UDUR_LAT)
['(CRC32 \\'\\'α "3839021470")']
>>> UDUR_RUS = HXLm.util.load_file(HXLm.HDATUM_UDHR + '/udhr.rus.hdp.yml')
>>> checksum(UDUR_RUS)
['(CRC32 \\'\\'α "3839021470")']
"""
if algorithm != 'CRC32':
raise NotImplementedError('algorithm [' +
str(algorithm) + '] not implemented')
# Escape ' is not an walk in the park. Just to simplify, we will replace
# double '' with '
if chktag.find("''") > -1:
chktag = chktag.replace("''", "'")
result = []
for hsilo in hdpgroup:
hashable_str = hashable([hsilo])[0]
hashable_code = _get_checksum(hashable_str, chktag=chktag)
result.append(hashable_code)
return result | 66566fbef3c962d5bcdf56727ff66ddfdd8af9b7 | 10,091 |
def dsum(i0,i1,step = 1, box=[]):
""" for a range of fits files
compute the mean and dispersion from the mean
"""
for i in range(i0,i1+1,step):
ff = 'IMG%05d.FIT' % i
h1, d1 = getData(ff,box)
#very specific for 16 bit data, since we want to keep the data in uint16
bzero = h1['BZERO']
bscale = h1['BSCALE']
if i == i0:
sum0 = 1.0
sum1 = d1*bscale+bzero
sum2 = sum1*sum1
#sum1 = d1
#sum2 = d1*d1
h = h1
nx = d1.shape[1]
ny = d1.shape[0]
nz = i1 + 1 - i0
c = np.zeros((nz, ny, nx))
c[0,:,:] = d1.reshape(ny,nx)
else:
sum0 = sum0 + 1.0
sum1 = sum1 + (d1 * bscale + bzero)
sum2 = sum2 + (d1 * bscale + bzero) * (d1 * bscale + bzero)
#sum2 = sum2+d1*d1
c[i - i0,:,:] = d1.reshape(ny,nx)
sum1 = sum1 / sum0
sum2 = sum2 / sum0 - sum1*sum1
print (type(sum1), type(sum2))
return (h,sum1,np.sqrt(sum2),c) | 6e0048461e29a7de4f7c4322fa1e3213f8248e60 | 10,092 |
def _env_translate_obs(obs):
"""
This should only be used for the Tiger ENV.
Parameters
----------
obs : list or array-like
The observation to be translated.
Returns
-------
str
A representation of the observation in English.
"""
if obs[0] == 1:
return 'GROWL_LEFT'
elif obs[1] == 1:
return 'GROWL_RIGHT'
elif obs[2] == 1:
return 'START'
elif obs[3] == 1:
return 'END'
else:
raise ValueError('Invalid observation: '.format(obs)) | 761ff3f3269e41b44bdab098d3682630d928cdc6 | 10,093 |
def voter(address):
"""
Returns voter credentials.
Parameters:
address: address
Returns:
list of three values addresss (str), is_voter (bool),
voted (bool).
"""
return contract.functions.voters(address).call() | 91fd7adca6f8ed2e02dbe60b6241eb92f34a81b6 | 10,094 |
def E_disp_z(m, N, j_star=3.):
"""Vertical displacement as a function of vertical wavenumber."""
num = E0*b**3*N0**2
den = 2*j_star*np.pi*N**2 * (1 + m/beta_star(N, j_star))**2
return num/den | 39e6b9b5d512d577c8109ecfb5657a0ef5a8ea42 | 10,095 |
def get_stereo_image():
"""Retrieve one stereo camera image
Returns:
(mat): cv2 image
"""
img = core.get_stereo_image()
if img is not None:
return img
else:
return None | 72e570672885e8ef8c14c9cd29d3f7c648f9abac | 10,096 |
import time
import requests
def request_set_arm_state(token: str, arm_state: str):
"""Request set arm state."""
headers = {
'Authorization': 'Bearer %s' % token,
'Content-Type': 'application/json'
}
payload = {
"Created": int(time.time()),
"AppVersion": APP_VERSION,
"AppType": APPTYPE,
"App": APP
}
response = requests.post(
_build_url('Location/{}'.format(arm_state)),
headers=headers,
json=payload)
response.raise_for_status()
return response.json() | 28a8ad2a0d49305d80581c2257d3fb9495b3f680 | 10,097 |
def get_all_config(filename=None):
"""
Set default configuration options for configparse
Config with defaults settings if no file will be passed
Also with defaults sections and defaults keys for missing options in config
:param filename: options config file to read
:return: configparser object with default config for missing sections
"""
_config = parse_config2(filename)
default_config = set_defaults()
# Verify each section in default_config
for s in range(len(default_config.sections())):
section = default_config.sections()[s]
# Add the missing section to the config obtained
if not _config.has_section(section):
_config.add_section(section)
# Add missing keys to config obtained
for key in default_config[section]:
if not _config.has_option(section, key):
_config[section][key] = default_config[section][key]
return _config | 9a5bdcd272f49be5bd8374e06f5b6579da91d64a | 10,098 |
def check_for_end_or_abort(e):
"""Return a closure checking for END or ABORT notifications
Arguments:
e -- event to signal when the action is completed
(will be set when an END or ABORT occurs)
"""
def check(notification, e = e):
print("EVENT : " + \
Base_pb2.ActionEvent.Name(notification.action_event))
if notification.action_event == Base_pb2.ACTION_END \
or notification.action_event == Base_pb2.ACTION_ABORT:
e.set()
return check | 91809c705666f4fd3aae7273760d5845fa35eadb | 10,099 |
def check_vacancy_at_cell(house_map, cell):
"""
Return True if the given cell is vacant.
Vacancy is defined as a '0' in the house map at the given coordinates.
(i.e. there is no wall at that location)
"""
x = cell[0]
y = cell[1]
if not 0 <= x < MAP_WIDTH:
return False
if not 0 <= y < MAP_HEIGHT:
return False
return house_map[y][x] == '0' | 78a24b25a6954b6411aa686512066b25c6f4e1d5 | 10,100 |
from typing import Dict
def extract_text_and_vertices(x: Dict[str, str]):
"""Extracts all annotations and bounding box vertices from a single OCR
output from Google Cloud Vision API.
The first element is the full OCR. It's equivalent to the output of
`extract_full_text_annotation` for the same OCR output.
Args:
x (Dict[str, str]): whole OCR output.
Returns:
list where each item is a tuple where the first element is the text and
the second are the 4 vertices of the corresponding bounding box.
"""
blocks = []
for annotation in x["textAnnotations"]:
text = annotation['description']
vertices = [
tuple(x.values()) for x in annotation['boundingPoly']['vertices']
]
blocks.append((text, vertices))
return blocks | 6fde2bc71ceccfd580a6f5f0da7fa0b76e045bad | 10,101 |
def cspace3(obs, bot, theta_steps):
"""
Compute the 3D (x, y, yaw) configuration space obstacle for a lit of convex 2D obstacles given by [obs] and a convex 2D robot given by vertices in [bot] at a variety of theta values.
obs should be a 3D array of size (2, vertices_per_obstacle, num_obstacles)
bot should be a 2d array of size (2, num_bot_vertices)
theta_steps can either be a scalar, in which case it specifies the number of theta values, evenly spaced between -pi and +pi; or it can be a vector of theta values.
"""
bot = -np.array(bot)
if np.isscalar(theta_steps):
thetas = np.linspace(-np.pi, np.pi, num=theta_steps)
else:
thetas = theta_steps
c_obs = []
for k in range(obs.shape[2]):
for j in range(len(thetas)-1):
th0 = thetas[j]
th1 = thetas[j+1]
bot_rot0 = rotmat(th0).dot(bot)
c_obs0 = minkowski_sum(bot_rot0, obs[:,:,k])
bot_rot1 = rotmat(th1).dot(bot)
c_obs1 = minkowski_sum(bot_rot1, obs[:,:,k])
c_pts = np.vstack((np.hstack((c_obs0, c_obs1)),
np.hstack((th0 + np.zeros(c_obs0.shape[1]),
th1 + np.zeros(c_obs1.shape[1])))))
c_obs.append(c_pts)
if len(c_obs) == 0:
return np.zeros((3, bot.shape[1] * 2, 0))
max_n_vert = max((x.shape[1] for x in c_obs))
return np.dstack((np.pad(c, pad_width=((0,0), (0,max_n_vert-c.shape[1])), mode='edge') for c in c_obs)) | 723e1b885a19ae0416226856f7b03aecb045e139 | 10,102 |
from typing import Optional
def Graph(backend:Optional[str]=None) -> BaseGraph:
"""Returns an instance of an implementation of :class:`~pyzx.graph.base.BaseGraph`.
By default :class:`~pyzx.graph.graph_s.GraphS` is used.
Currently ``backend`` is allowed to be `simple` (for the default),
or 'graph_tool' and 'igraph'.
This method is the preferred way to instantiate a ZX-diagram in PyZX.
Example:
To construct an empty ZX-diagram, just write::
g = zx.Graph()
"""
if backend is None: backend = 'simple'
if backend not in backends:
raise KeyError("Unavailable backend '{}'".format(backend))
if backend == 'simple': return GraphS()
if backend == 'graph_tool':
return GraphGT()
if backend == 'igraph': return GraphIG()
if backend == 'quizx-vec': return quizx.VecGraph() # type: ignore
return GraphS() | 9d2d759096016e0df770863448305b627df0ce73 | 10,103 |
from datetime import datetime
def get_carb_data(data, offset=0):
""" Load carb information from an issue report cached_carbs dictionary
Arguments:
data -- dictionary containing cached carb information
offset -- the offset from UTC in seconds
Output:
3 lists in (carb_values, carb_start_dates, carb_absorption_times)
format
"""
carb_values = [float(dict_.get("quantity")) for dict_ in data]
start_dates = [
datetime.strptime(
dict_.get("startDate"),
" %Y-%m-%d %H:%M:%S %z"
) + timedelta(seconds=offset)
for dict_ in data
]
absorption_times = [
float(dict_.get("absorptionTime")) / 60
if dict_.get("absorptionTime") is not None
else None for dict_ in data
]
assert len(start_dates) == len(carb_values) == len(absorption_times),\
"expected input shapes to match"
return (start_dates, carb_values, absorption_times) | cc2e54859f3f4635e9724260f277dd3c191c32ac | 10,104 |
def _discover_bounds(cdf, tol=1e-7):
"""
Uses scipy's general continuous distribution methods
which compute the ppf from the cdf, then use the ppf
to find the lower and upper limits of the distribution.
"""
class DistFromCDF(stats.distributions.rv_continuous):
def cdf(self, x):
return cdf(x)
dist = DistFromCDF()
# the ppf is the inverse cdf
lower = dist.ppf(tol)
upper = dist.ppf(1. - tol)
return lower, upper | bb882065ed74a34c61c60aa48481b2737a2496da | 10,105 |
def ml_app_instances_ml_app_instance_id_get(ml_app_instance_id): # noqa: E501
"""ml_app_instances_ml_app_instance_id_get
# noqa: E501
:param ml_app_instance_id: MLApp instance identifier
:type ml_app_instance_id: str
:rtype: None
"""
return 'do some magic!' | e702d106b6dd4999ed536f77347ca84675be3716 | 10,106 |
import random
def generate_name(style: str = 'underscore', seed: int = None) -> str:
"""Generate a random name."""
if seed is not None:
random.seed(seed)
return format_names(random_names(), style=style) | 2f74460f5492c3b4788800d6e33a44b856df91aa | 10,107 |
def argunique(a, b):
"""
找出a--b对应体中的唯一对应体,即保证最终输出的aa--bb没有重复元素,也没有多重对应
:param a:
:param b:
:return: aaa, bbb 使得aaa-bbb是唯一对
"""
# 先对a中元素进行逐个检查,如果第一次出现,那么添加到aa中,如果不是第一次,那么检查是否一致,不一致则设置成-1
# 设置成-1,代表a中当前元素i有过一对多纪录,剔除。同时-1也不会被再匹配到
seta = {}
for i, j in zip(a, b):
if i not in seta:
seta[i] = j
elif seta[i] != j:
seta[i] = -1
aa = [i for i in seta if seta[i] != -1]
bb = [seta[i] for i in seta if seta[i] != -1]
# 再反过来做一遍,以b为索引,剔除重复项
setb = {}
for i, j in zip(aa, bb):
if j not in setb:
setb[j] = i
elif setb[j] != i:
setb[j] = -1
aaa = [setb[j] for j in setb if setb[j] != -1]
bbb = [j for j in setb if setb[j] != -1]
return aaa, bbb | e804436203496d5f3109511967a0d75eaca330da | 10,108 |
def move(obj, direction):
"""
Moves object by (dx, dy).
Returns true if move succeeded.
"""
goal = obj.pos + direction
if (goal.x < 0 or goal.y < 0 or
goal.x >= obj.current_map.width or
goal.y >= obj.current_map.height):
# try_ catches this for the player, but need to
# check here for NPCs
return False
if not obj.current_map.is_blocked_from(obj.pos, goal):
obj.pos = goal
if obj.fighter:
obj.fighter.exhaustion += MOVE_EXHAUSTION
return True
return False | 23917e448ed953acb2bb864d7a14d01c72f07a85 | 10,109 |
def addMedicine(medicine: object):
"""Data required are "name", "description", "price", "quantity", "medicalId" """
return mr.makePostRequest(mr.API + "/medicine/", medicine) | f488ecd16d6e2986944ae26e5776ca9f9be7e170 | 10,110 |
from benchbuild.utils.db import create_run
from benchbuild.utils import schema as s
from benchbuild.settings import CFG
from datetime import datetime
def begin(command, project, ename, group):
"""
Begin a run in the database log.
Args:
command: The command that will be executed.
pname: The project name we belong to.
ename: The experiment name we belong to.
group: The run group we belong to.
Returns:
(run, session), where run is the generated run instance and session the
associated transaction for later use.
"""
db_run, session = create_run(command, project, ename, group)
db_run.begin = datetime.now()
db_run.status = 'running'
log = s.RunLog()
log.run_id = db_run.id
log.begin = datetime.now()
log.config = repr(CFG)
session.add(log)
session.commit()
return db_run, session | a33a5e809b20b6d1f92545bd0df5de9fbc230f91 | 10,111 |
def fortran_library_item(lib_name,
sources,
**attrs
): #obsolete feature
""" Helper function for creating fortran_libraries items. """
build_info = {'sources':sources}
known_attrs = ['module_files','module_dirs',
'libraries','library_dirs']
for key,value in attrs.items():
if key not in known_attrs:
raise TypeError,\
"fortran_library_item() got an unexpected keyword "\
"argument '%s'" % key
build_info[key] = value
return (lib_name,build_info) | 720802933b9ebcaab566f3deeb063341b85dba7e | 10,112 |
def copy_generator(generator):
"""Copy an existing numpy (random number) generator.
Parameters
----------
generator : numpy.random.Generator or numpy.random.RandomState
The generator to copy.
Returns
-------
numpy.random.Generator or numpy.random.RandomState
In numpy <=1.16 a ``RandomState``, in 1.17+ a ``Generator``.
Both are copies of the input argument.
"""
if isinstance(generator, np.random.RandomState):
return _copy_generator_np116(generator)
return _copy_generator_np117(generator) | 57f5c3b9ad934330b1eedb6460943204f97b9436 | 10,113 |
def test_pages_kingdom_successful(args, protein_gen_success, cazy_home_url, monkeypatch):
"""Test parse_family_by_kingdom() when all is successful."""
test_fam = Family("famName", "CAZyClass", "http://www.cazy.org/GH14.html")
def mock_get_pag(*args, **kwargs):
return ["http://www.cazy.org/GH14_all.html"]
def mock_get_pages(*args, **kwargs):
return protein_gen_success
monkeypatch.setattr(get_cazy_pages, "get_pagination_pages_kingdom", mock_get_pag)
monkeypatch.setattr(get_cazy_pages, "get_html_page", mock_get_pages)
get_cazy_pages.parse_family_by_kingdom(
family=test_fam,
cazy_home=cazy_home_url,
args=args["args"],
kingdoms=["Bacteria"],
) | b5fdbeac6a4a54170c17a98689ae5cc6bbca9542 | 10,116 |
def _truncate_and_pad_token_ids(token_ids, max_length):
"""Truncates or pads the token id list to max length."""
token_ids = token_ids[:max_length]
padding_size = max_length - len(token_ids)
if padding_size > 0:
token_ids += [0] * padding_size
return token_ids | a8f29fdbc99c3dcac42b9275037d3a3c39c22e12 | 10,117 |
def build_bundletoperfectsensor_pipeline(pan_img, ms_img):
"""
This function builds the a pipeline that performs P+XS pansharpening
:param pan_img: Path to the panchromatic image
:type pan_img: string
:param ms_img: Path to the multispectral image
:type ms_img: string
:returns: resample_image
:rtype: otb application
"""
pansharpening_app = otbApplication.Registry.CreateApplication(
"BundleToPerfectSensor"
)
pansharpening_app.SetParameterString("inp", pan_img)
pansharpening_app.SetParameterString("inxs", ms_img)
pansharpening_app.Execute()
return pansharpening_app | f40aa0828ef50ef8f81f901f93dbaf8690a14d4f | 10,118 |
def get_argparser_ctor_args():
"""
This method returns a dict containing the kwargs for constructing an
argparse.ArgumentParser (either directly or as a subparser).
"""
return {
'prog': 'CodeChecker store',
'formatter_class': arg.RawDescriptionDefaultHelpFormatter,
# Description is shown when the command's help is queried directly
'description': """
Store the results from one or more 'codechecker-analyze' result files in a
database.""",
# Epilogue is shown after the arguments when the help is queried
# directly.
'epilog': """
Environment variables
------------------------------------------------
CC_PASS_FILE The location of the password file for auto login. By default
CodeChecker will use '~/.codechecker.passwords.json' file.
It can also be used to setup different credential files to
login to the same server with a different user.
CC_SESSION_FILE The location of the session file where valid sessions are
stored. This file will be automatically created by
CodeChecker. By default CodeChecker will use
'~/.codechecker.session.json'. This can be used if
restrictive permissions forbid CodeChecker from creating
files in the users home directory (e.g. in a CI
environment).
The results can be viewed by connecting to such a server in a Web browser or
via 'CodeChecker cmd'.""",
# Help is shown when the "parent" CodeChecker command lists the
# individual subcommands.
'help': "Save analysis results to a database."
} | 9debf6233652052782295aeb5b630ee2b4b3b19e | 10,119 |
def castep_geom_count(dot_castep):
"""Count the number of geom cycles"""
count = 0
with open(dot_castep) as fhandle:
for line in fhandle:
if 'starting iteration' in line:
count += 1
return count | 6a619b5853a02a8c118af1fc19da0d803941c84f | 10,120 |
def nav_login(request, text="Login", button=False):
"""Navigation login button
Args:
request (Request): Request object submitted by template
text (str, optional): Text to be shown in button. Defaults to "Login".
button (bool, optional): Is this to be styled as a button or as a link. Defaults to False.
Returns:
SafeText: HTML form
"""
url = reverse("login")
return nav_next(request, url, text, button) | ddbc3de38c47425ec9f095577d178c068cce74c2 | 10,121 |
def parse_adapter(name: str, raw: dict) -> dict:
"""Parse a single adapter."""
parsed = {
"name": strip_right(obj=name, fix="_adapter"),
"name_raw": name,
"name_plugin": raw["unique_plugin_name"],
"node_name": raw["node_name"],
"node_id": raw["node_id"],
"status": raw["status"],
"features": raw["supported_features"],
}
generic_name = GENERIC_NAME
discovery_name = DISCOVERY_NAME
specific_name = get_specific_name(raw=raw)
config = raw["config"]
specific_schema = config.get(specific_name, {}).get("schema", {})
specific_schema = parse_schema(raw=specific_schema)
generic_schema = config[generic_name]["schema"]
generic_schema = parse_schema(raw=generic_schema)
discovery_schema = config[discovery_name]["schema"]
discovery_schema = parse_schema(raw=discovery_schema)
cnx_schema = parse_schema(raw=raw["schema"])
cnx_schema["connection_label"] = {
"name": "connection_label",
"title": "Connection Label",
"type": "string",
"required": False,
}
parsed["schemas"] = {
"cnx": cnx_schema,
"specific": specific_schema,
"generic": generic_schema,
"discovery": discovery_schema,
"generic_name": generic_name,
"specific_name": specific_name,
"discovery_name": discovery_name,
}
parsed["config"] = {
"specific": raw["config"].get(specific_name, {}).get("config", {}),
"generic": raw["config"].get(generic_name, {}).get("config", {}),
"discovery": raw["config"].get(discovery_name, {}).get("config", {}),
}
parsed["cnx"] = parse_cnx(raw=raw, parsed=parsed)
parsed["cnx_count_total"] = len(parsed["cnx"])
parsed["cnx_count_broken"] = len([x for x in parsed["cnx"] if not x["working"]])
parsed["cnx_count_working"] = len([x for x in parsed["cnx"] if x["working"]])
return parsed | 085b8a38561d6ffda12ca27d2f2089759b34e1ed | 10,122 |
def export_phones(ucm_axl):
"""
Export Phones
"""
try:
phone_list = ucm_axl.get_phones(
tagfilter={
"name": "",
"description": "",
"product": "",
"model": "",
"class": "",
"protocol": "",
"protocolSide": "",
"callingSearchSpaceName": "",
"devicePoolName": "",
"commonDeviceConfigName": "",
"commonPhoneConfigName": "",
"networkLocation": "",
"locationName": "",
"mediaResourceListName": "",
"networkHoldMohAudioSourceId": "",
"userHoldMohAudioSourceId": "",
"loadInformation": "",
"securityProfileName": "",
"sipProfileName": "",
"cgpnTransformationCssName": "",
"useDevicePoolCgpnTransformCss": "",
"numberOfButtons": "",
"phoneTemplateName": "",
"primaryPhoneName": "",
"loginUserId": "",
"defaultProfileName": "",
"enableExtensionMobility": "",
"currentProfileName": "",
"loginTime": "",
"loginDuration": "",
# "currentConfig": "",
"ownerUserName": "",
"subscribeCallingSearchSpaceName": "",
"rerouteCallingSearchSpaceName": "",
"allowCtiControlFlag": "",
"alwaysUsePrimeLine": "",
"alwaysUsePrimeLineForVoiceMessage": "",
}
)
all_phones = []
for phone in phone_list:
# print(phone)
phone_details = {
"name": phone.name,
"description": phone.description,
"product": phone.product,
"model": phone.model,
"protocol": phone.protocol,
"protocolSide": phone.protocolSide,
"callingSearchSpaceName": phone.callingSearchSpaceName._value_1,
"devicePoolName": phone.defaultProfileName._value_1,
"commonDeviceConfigName": phone.commonDeviceConfigName._value_1,
"commonPhoneConfigName": phone.commonPhoneConfigName._value_1,
"networkLocation": phone.networkLocation,
"locationName": phone.locationName._value_1,
"mediaResourceListName": phone.mediaResourceListName._value_1,
"networkHoldMohAudioSourceId": phone.networkHoldMohAudioSourceId,
"userHoldMohAudioSourceId": phone.userHoldMohAudioSourceId,
"loadInformation": phone.loadInformation,
"securityProfileName": phone.securityProfileName._value_1,
"sipProfileName": phone.sipProfileName._value_1,
"cgpnTransformationCssName": phone.cgpnTransformationCssName._value_1,
"useDevicePoolCgpnTransformCss": phone.useDevicePoolCgpnTransformCss,
"numberOfButtons": phone.numberOfButtons,
"phoneTemplateName": phone.phoneTemplateName._value_1,
"primaryPhoneName": phone.primaryPhoneName._value_1,
"loginUserId": phone.loginUserId,
"defaultProfileName": phone.defaultProfileName._value_1,
"enableExtensionMobility": phone.enableExtensionMobility,
"currentProfileName": phone.currentProfileName._value_1,
"loginTime": phone.loginTime,
"loginDuration": phone.loginDuration,
# "currentConfig": phone.currentConfig,
"ownerUserName": phone.ownerUserName._value_1,
"subscribeCallingSearchSpaceName": phone.subscribeCallingSearchSpaceName._value_1,
"rerouteCallingSearchSpaceName": phone.rerouteCallingSearchSpaceName._value_1,
"allowCtiControlFlag": phone.allowCtiControlFlag,
"alwaysUsePrimeLine": phone.alwaysUsePrimeLine,
"alwaysUsePrimeLineForVoiceMessage": phone.alwaysUsePrimeLineForVoiceMessage,
}
line_details = ucm_axl.get_phone(name=phone.name)
# print(line_details.lines.line)
try:
for line in line_details.lines.line:
# print(line)
phone_details[f"line_{line.index}_dirn"] = line.dirn.pattern
phone_details[f"line_{line.index}_routePartitionName"] = line.dirn.routePartitionName._value_1
phone_details[f"line_{line.index}_display"] = line.display
phone_details[f"line_{line.index}_e164Mask"] = line.e164Mask
except Exception as e:
print(e)
all_phones.append(phone_details)
print(
f"exporting: {phone.name}: {phone.model} - {phone.description}")
print("-" * 35)
print(f"number of phones: {len(all_phones)}")
return all_phones
except Exception as e:
print(e)
return [] | 1487cef48c5666224da57173b968e9988f587a57 | 10,123 |
def is_various_artists(name, mbid):
"""Check if given name or mbid represents 'Various Artists'."""
return name and VA_PAT.match(name) or mbid == VA_MBID | 084f1d88b99ec7f5b6eac0774a05e901bd701603 | 10,124 |
def validate_ruletype(t):
"""Validate *bounds rule types."""
if t not in ["typebounds"]:
raise exception.InvalidBoundsType("{0} is not a valid *bounds rule type.".format(t))
return t | a8ae173f768837cdc35d1a8f6429614b58a74988 | 10,125 |
def decode_section_flags(sflags: str) -> int:
"""Map readelf's representation of section flags to ELF flag values."""
d = {
'W': elftools.elf.constants.SH_FLAGS.SHF_WRITE,
'A': elftools.elf.constants.SH_FLAGS.SHF_ALLOC,
'X': elftools.elf.constants.SH_FLAGS.SHF_EXECINSTR,
'M': elftools.elf.constants.SH_FLAGS.SHF_MERGE,
'S': elftools.elf.constants.SH_FLAGS.SHF_STRINGS,
'I': elftools.elf.constants.SH_FLAGS.SHF_INFO_LINK,
'L': elftools.elf.constants.SH_FLAGS.SHF_LINK_ORDER,
'O': elftools.elf.constants.SH_FLAGS.SHF_OS_NONCONFORMING,
'G': elftools.elf.constants.SH_FLAGS.SHF_GROUP,
'T': elftools.elf.constants.SH_FLAGS.SHF_TLS,
'C': 0x800, # SHF_COMPRESSED
'E': elftools.elf.constants.SH_FLAGS.SHF_EXCLUDE,
'y': 0x20000000, # SHF_ARM_PURECODE
}
flags = 0
for k, v in d.items():
if k in sflags:
flags |= v
return flags | e007f1f370f6203bafe92a1a6422100f2d9626ae | 10,127 |
def nCr(n,r):
"""
Implements multiplicative formula:
https://en.wikipedia.org/wiki/Binomial_coefficient#Multiplicative_formula
"""
if r < 0 or r > n:
return 0
if r == 0 or r == n:
return 1
c = 1
for i in xrange(min(r, n - r)):
c = c * (n - i) // (i + 1)
return c | 8c0dc30b4cdab47c99bf98459e435147ac0b92fd | 10,128 |
import inspect
def _get_init_arguments(cls, *args, **kwargs):
"""Returns an OrderedDict of args passed to cls.__init__ given [kw]args."""
init_args = inspect.signature(cls.__init__)
bound_args = init_args.bind(None, *args, **kwargs)
bound_args.apply_defaults()
arg_dict = bound_args.arguments
del arg_dict['self']
return arg_dict | 116c01f9edb838e4b392fa624a454fdf4c455f1a | 10,130 |
def MatchCapture(nfa: NFA, id: CaptureGroup) -> NFA:
"""Handles: (?<id>A)"""
captures = {(s, i): {id} for (s, i) in nfa.transitions if i != Move.EMPTY}
return NFA(nfa.start, nfa.end, nfa.transitions, merge_trans(nfa.captures, captures)) | 08805d01be73480cfea4d627c6a67969290c1d11 | 10,131 |
def get_all_state_events(log):
""" Returns a list of tuples of event id, state_change_id, block_number and events"""
return [
(InternalEvent(res[0], res[1], res[2], log.serializer.deserialize(res[3])))
for res in get_db_state_changes(log.storage, 'state_events')
] | c75307c930add3e142996e19c441c84fd663e36a | 10,133 |
def iff(a: NNF, b: NNF) -> Or[And[NNF]]:
"""``a`` is true if and only if ``b`` is true."""
return (a & b) | (a.negate() & b.negate()) | 82ea5bfe9c4e1f79361319b2d8455cba898e77ec | 10,134 |
def redact_access_token(e: Exception) -> Exception:
"""Remove access token from exception message."""
if not isinstance(e, FacebookError):
return e
e.args = (redact_access_token_from_str(str(e.args[0])),)
return e | 63d7a7422cb7315866e9c25552fa96b403673261 | 10,135 |
def _get_ext_comm_subtype(type_high):
"""
Returns a ByteEnumField with the right sub-types dict for a given community.
http://www.iana.org/assignments/bgp-extended-communities/bgp-extended-communities.xhtml
"""
return _ext_comm_subtypes_classes.get(type_high, {}) | 5b5782659f1d261162d8f9d5becfe65b852f3bdc | 10,136 |
import click
def _filter_classes(classes, filters, names_only, iq):
"""
Filter a list of classes for the qualifiers defined by the
qualifier_filter parameter where this parameter is a list of tuples.
each tuple contains the qualifier name and a dictionary with qualifier
name as key and tuple containing the option_value(True or False) and
a list of booleans where each boolean represents one of the scope types
()
whether to display or not display if it exists.
This method only works for boolean qualifiers
Parameters:
classes (list of :class:`~pywbem.CIMClass`):
list of classes to be filtered
qualifier_filters (dict):
Dictionary defining the filtering to be performed. It contains an entry
for each qualifier filter that is defined. See _build_qualifier_filters
for a definition of this list.
names_only (:class:`py:bool`):
If True, return only the classnames. Otherwise returns the filtered
classes. This is because we must get the classes from the server to
perform the filtering
iq (:class:`py:bool`):
If not True, remove any qualifiers from the classes. This is because
we must get the classes from the server with qualifiers to
perform the filtering.
"""
def class_has_qualifier(cls, qname, scopes):
"""
Determine if the qualifier defined by qname exists in the elements
of the class where the elements are defined by the scopes parameter
for this filter.
Parameters:
cls (:class:`~pywbem.CIMClass`):
The class to be inspected for the qualifier defined by qname
qname (:term:`string`):
The qualifier for which we are searching
scopes (tuple of booleans):
A tuple containing a boolean value for each of the possible scopes
(class, property, method, parameter)
Returns:
True if the qualifier with name quname is found in the elements where
the scope is True. Otherwise, False is returned
"""
# Test class scope
if scopes[0] and qname in cls.qualifiers:
return True
# if property scope, test properties
if scopes[1]:
for prop in cls.properties.values():
if qname in prop.qualifiers:
return True
# If method scope, test methods and if parameter scope, test parameters
if scopes[2]:
for method in cls.methods.values():
if qname in method.qualifiers:
return True
if scopes[3]:
params = method.parameters
for param in params.values():
if qname in param.qualifiers:
return True
return False
# Test all classes in the input property for the defined filters.
filtered_classes = []
subclass_names = []
# Build list of subclass names that will be used later as a filter on the
# classes to be returned
if 'subclass_of' in filters:
try:
subclass_names = get_subclass_names(
classes,
classname=filters['subclass_of'].optionvalue,
deep_inheritance=True)
except ValueError:
raise click.ClickException(
'Classname {} for "subclass-of" not found in returned classes.'
.format(filters['subclass_of'].optionvalue))
# Build a list of leaf class names that will be used later as a filter on
# the classes to be returned.
if 'leaf_classes' in filters:
try:
if subclass_names:
clsx = [cls for cls in classes if cls.classname in
subclass_names]
leafclass_names = get_leafclass_names(clsx)
else:
leafclass_names = get_leafclass_names(classes)
except ValueError:
raise click.ClickException(
'Classname {} for "leaf_classes-of" not found in returned '
'classes.'.format(filters['leaf_classes'].optionvalue))
for cls in classes:
show_class_list = []
for filter_name, filter_ in filters.items():
if filter_name == 'qualifier':
option_value = filter_.optionvalue
if class_has_qualifier(cls, filter_.qualifiername,
filter_.scopes):
if filter_.qualifiername == 'version':
if filter_.qualifiername in cls.qualifiers:
cls_version = \
cls.qualifiers[filter_.qualifiername].value
val = parse_version_value(cls_version,
cls.classname)
option_value = bool(val >= filter_.optionvalue)
show_class_list.append(option_value)
else:
show_class_list.append(not option_value)
elif filter_name == 'schema':
show_class_list.append(
cls.classname.lower().startswith(filter_.optionvalue))
elif filter_name == 'subclass_of':
show_class_list.append(cls.classname in subclass_names)
elif filter_name == 'leaf_classes':
show_class_list.append(cls.classname in leafclass_names)
else:
assert False # Future for other test_types
# Show if all options are True for this class
show_this_class = all(show_class_list)
if show_this_class:
# If returning instances, honor the names_only option
if not names_only and not iq:
cls.qualifiers = []
for p in cls.properties.values():
p.qualifiers = []
for m in cls.methods.values():
m.qualifiers = []
for p in m.parameters.values():
p.qualifiers = []
filtered_classes.append(cls)
# If names_only parameter create list of classnames
if names_only:
filtered_classes = [cls.classname for cls in filtered_classes]
return filtered_classes | eecee9f5a1ccf6c793000faf11cd0f666a0c0f7b | 10,137 |
def template2():
"""load_cep_homo"""
script = """
## (Store,figure)
<< host = chemml
<< function = SavePlot
<< kwargs = {'normed':True}
<< output_directory = plots
<< filename = amwVSdensity
>> 0 fig
## (Visualize,artist)
<< host = chemml
<< function = decorator
<< title = AMW vs. Density
<< grid_color = g
<< xlabel = density (Kg/m3)
<< ylabel = atomic molecular weight
<< grid = True
<< size = 18
>> fig 0
>> 4 fig
## (Enter,python script)
<< host = chemml
<< function = PyScript
<< line01 = print (iv1.head())
>> 1 iv1
## (Enter,datasets)
<< host = chemml
<< function = load_organic_density
>> smiles 1
>> density 2
>> features 3
## (Visualize,plot)
<< host = chemml
<< function = scatter2D
<< y = 0
<< marker = o
<< x = 'AMW'
>> 2 dfy
>> 3 dfx
>> fig 4
"""
return script.strip().split('\n') | 2d6dfbab0ef3093645b67da756491fd1b1639649 | 10,138 |
from typing import Tuple
from typing import Any
def get_target_and_encoder_gpu(train: GpuDataset) -> Tuple[Any, type]:
"""Get target encoder and target based on dataset.
Args:
train: Dataset.
Returns:
(Target values, Target encoder).
"""
target = train.target
if isinstance(target, cudf.Series):
target = target.values
target_name = train.target.name
if train.task.name == 'multiclass':
n_out = cp.max(target)+1
target = (target[:, cp.newaxis] == cp.arange(n_out)[cp.newaxis, :])
encoder = MultiClassTargetEncoder_gpu
else:
encoder = TargetEncoder_gpu
return target, encoder | 64cfee3ec9c58bf07d9eb28977b4e5cb7ebadc80 | 10,139 |
import functools
def once(f):
"""Cache result of a function first call"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
rv = getattr(f, 'rv', MISSING)
if rv is MISSING:
f.rv = f(*args, **kwargs)
return f.rv
return wrapper | 25d096f76d156c7a8a26f8f159b65b9b31c8d927 | 10,141 |
import torch
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_config(args, cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.MODEL.BUA.EXTRACTOR.MODE = 1
default_setup(cfg, args)
cfg.MODEL.DEVICE = 'cuda:0' if torch.cuda.is_available() else 'cpu'
cfg.freeze()
return cfg | 76a8a21714a1fed96fe7b6f33cb948aee30bffcf | 10,142 |
def build_audit_stub(obj):
"""Returns a stub of audit model to which assessment is related to."""
audit_id = obj.audit_id
if audit_id is None:
return None
return {
'type': 'Audit',
'id': audit_id,
'context_id': obj.context_id,
'href': '/api/audits/%d' % audit_id,
'issue_tracker': obj.audit.issue_tracker,
} | 705f066975bf9dae8704944c71eeb3e313cf445f | 10,143 |
def calculate_widths(threshold_img, landmarks):
"""
Calcula a largura dos vasos sanguíneos nos pontos de potenciais
bifurcação. Esse cálculo é feito pegando a menor distância percorrida
a partir do ponto em cada uma das direções (8 direções são utilizadas).
A função retorna o que seria equivalente ao diametro do vasos em cada
ponto.
:param threshold_img: imagem (binária) usada para calculo da largura dos
vasos sanguíneos
:param landmarks: pontos onde calcular as larguras
:return: vetor com larguras de cada um dos pontos (diametro dos vasos)
"""
N, M = threshold_img.shape
widths = []
for x, y, mark_type in landmarks:
# down
i = x
j = y
vert_dist = 0
while(j < M and threshold_img[i, j] != 0):
vert_dist += 1
j += 1
# up
i = x
j = y
while(j >= 0 and threshold_img[i, j] != 0):
vert_dist += 1
j -= 1
# right
horiz_dist = 0
i = x
j = y
while(i < N and threshold_img[i, j] != 0):
horiz_dist += 1
i += 1
# left
i = x
j = y
while(i >= 0 and threshold_img[i, j] != 0):
horiz_dist += 1
i -= 1
# down right
i = x
j = y
s_diag_dist = 0
while(i < N and j < M and threshold_img[i, j] != 0):
i += 1
j += 1
s_diag_dist += 1
# up left
i = x
j = y
while(i >= 0 and j >= 0 and threshold_img[i, j] != 0):
i -= 1
j -= 1
s_diag_dist += 1
# down left
i = x
j = y
p_diag_dist = 0
while(i >= 0 and j < M and threshold_img[i, j] != 0):
i -= 1
j += 1
p_diag_dist += 1
# up right
i = x
j = y
while(i < N and j >= 0 and threshold_img[i, j] != 0):
i += 1
j -= 1
p_diag_dist += 1
min_width = np.min([vert_dist, horiz_dist, p_diag_dist, s_diag_dist])
widths.append([(x, y), np.ceil(min_width).astype(int), mark_type])
return widths | 304ce6bec19faba0a0520b63435fcbf66f8989f0 | 10,144 |
from typing import Optional
def smi_to_fp(smi: str, fingerprint: str,
radius: int = 2, length: int = 2048) -> Optional[np.ndarray]:
"""fingerprint functions must be wrapped in a static function
so that they may be pickled for parallel processing
Parameters
----------
smi : str
the SMILES string of the molecule to encode
fingerprint : str
the the type of fingerprint to generate
radius : int
the radius of the fingerprint
length : int
the length of the fingerprint
Returns
-------
T_comp
the compressed feature representation of the molecule
"""
mol = Chem.MolFromSmiles(smi)
if mol is None:
return None
if fingerprint == 'morgan':
fp = rdmd.GetMorganFingerprintAsBitVect(
mol, radius=radius, nBits=length, useChirality=True)
elif fingerprint == 'pair':
fp = rdmd.GetHashedAtomPairFingerprintAsBitVect(
mol, minLength=1, maxLength=1+radius, nBits=length)
elif fingerprint == 'rdkit':
fp = rdmd.RDKFingerprint(
mol, minPath=1, maxPath=1+radius, fpSize=length)
elif fingerprint == 'maccs':
fp = rdmd.GetMACCSKeysFingerprint(mol)
else:
raise NotImplementedError(
f'Unrecognized fingerprint: "{fingerprint}"')
x = np.empty(len(fp))
DataStructs.ConvertToNumpyArray(fp, x)
return x | fa768c5b53a4a1b637b1928127ef85506d375fd7 | 10,145 |
def f(x, t):
"""function to learn."""
return tf.square(tf.cast(t, tf.float32) / FLAGS.tm) * (tf.math.sin(5 * x) + 1) | 9138b7a2acf43a1c62d5da8157725ff10e6f7f78 | 10,146 |
def render_cells(cells, width=80, col_spacing=2):
"""Given a list of short (~10 char) strings, display these aligned in
columns.
Example output::
Something like this can be
used to neatly arrange long
sequences of values in a
compact format.
Parameters
----------
cells : [(strlen, str), ...]
Gives the cells to print as tuples giving the strings length in visible
characters and the string to display.
width : int
The width of the terminal.
col_spacing : int
Size of the gap to leave between columns.
"""
# Special case (since max below will fail)
if len(cells) == 0:
return ""
# Columns should be at least as large as the largest cell with padding
# between columns
col_width = max(strlen for strlen, s in cells) + col_spacing
lines = [""]
cur_length = 0
for strlen, s in cells:
# Once line is full, move to the next
if cur_length + strlen > width:
lines.append("")
cur_length = 0
# Add the current cell (with spacing)
lines[-1] += s + (" "*(col_width - strlen))
cur_length += col_width
return "\n".join(map(str.rstrip, lines)) | 714b915430be84980c3a9b74f3c5b2cb89b6acba | 10,147 |
def separate_types(data):
"""Separate out the points from the linestrings."""
if data['type'] != 'FeatureCollection':
raise TypeError('expected a FeatureCollection, not ' + data['type'])
points = []
linestrings = []
for thing in data['features']:
if thing['type'] != 'Feature':
raise TypeError('expected Feature, not ' + thing['type'])
geometry_type = thing['geometry']['type']
if geometry_type == 'Point':
points.append(thing)
elif geometry_type == 'LineString':
linestrings.append(thing)
else:
raise TypeError('expected Point or LineString, not ' + geometry_type)
return points, linestrings | 28ab8eb7e2cdf1206f4908a15506a9b9af1aa428 | 10,148 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.