content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def network_instance_create(network, host, attrs=None):
"""
Creates a network_instance of given kind and host, configuring it with the given attributes.
Parameter *kind*:
The parameter *kind* must be a string identifying one of the supported
network_instance kinds.
Parameter *host*:
The parameter *host* must be a string giving a host for the network_instance.
Parameter *attrs*:
The attributes of the network_instance can be given as the parameter *attrs*.
This parameter must be a dict of attributes if given. Attributes can
later be changed using :py:func:`network_instance_modify`.
Return value:
The return value of this method is the info dict of the new network_instance as
returned by :py:func:`resource_info`.
"""
if not attrs: attrs = {}
attrs = dict(attrs)
attrs.update(host=host, network=network)
res = NetworkInstance.create(attrs)
return res.info() | 23efa27090081bc59f917cdcf7497f75be0f93b4 | 11,423 |
def update_get():
"""Fetches the state of the latest update job.
Returns:
On success, a JSON data structure with the following properties:
status: str describing the status of the job. Can be one of
["NOT_RUNNING", "DONE", "IN_PROGRESS"].
Example:
{
"status": "NOT_RUNNING"
}
Returns error object on failure.
"""
status, error = update.status.get()
if error:
return json_response.error(error), 500
return json_response.success({'status': str(status)}) | 9e1a2438fc8b4d1dd1bd1354d478c4d4e3e58098 | 11,424 |
def create_github_url(metadata, is_file=False):
"""Constrói a URL da API
Constrói a URL base da API do github a partir
dos dados presentes no metadata.
Args:
metadata: JSON com informações acerca do dataset.
is_file: FLAG usada pra sinalizar se o dataset é apenas um elemento.
"""
url_params = metadata['url'].split('/')
server_idx = url_params.index('github.com')
username = url_params[server_idx + 1]
repo = url_params[server_idx + 2]
data_path = metadata['path']
return ("https://raw.githubusercontent.com/{}/{}/master{}" if is_file else "https://api.github.com/repos/{}/{}/contents{}").format(username, repo, data_path) | 6c138d92cd7b76f87c225a1fd98e7d397b0d6d28 | 11,425 |
def kge_2012(obs, sim, missing="drop", weighted=False, max_gap=30):
"""Compute the (weighted) Kling-Gupta Efficiency (KGE).
Parameters
----------
sim: pandas.Series
Series with the simulated values.
obs: pandas.Series
Series with the observed values.
missing: str, optional
string with the rule to deal with missing values. Only "drop" is
supported now.
weighted: bool, optional
Weight the values by the normalized time step to account for
irregular time series. Default is False.
max_gap: int, optional
maximum allowed gap period in days to use for the computation of the
weights. All time steps larger than max_gap are replace with the
max_gap value. Default value is 30 days.
Notes
-----
The (weighted) Kling-Gupta Efficiency [kling_2012]_ is computed as follows:
.. math:: \\text{KGE} = 1 - \\sqrt{(r-1)^2 + (\\beta-1)^2 - (\\gamma-1)^2}
where :math:`\\beta = \\bar{x} / \\bar{y}` and :math:`\\gamma =
\\frac{\\bar{\\sigma}_x / \\bar{x}}{\\bar{\\sigma}_y / \\bar{y}}`. If
weighted equals True, the weighted mean, variance and pearson
correlation are used.
References
----------
.. [kling_2012] Kling, H., Fuchs, M., and Paulin, M. (2012). Runoff
conditions in the upper Danube basin under an ensemble of climate
change scenarios. Journal of Hydrology, 424-425:264 - 277.
"""
if missing == "drop":
obs = obs.dropna()
sim = sim.reindex(obs.index).dropna()
# Return nan if the time indices of the sim and obs don't match
if sim.index.size == 0:
logger.warning("Time indices of the sim and obs don't match.")
return nan
r = pearsonr(obs=obs, sim=sim, weighted=weighted, max_gap=max_gap)
mu_sim = mean(sim, weighted=weighted, max_gap=max_gap)
mu_obs = mean(obs, weighted=weighted, max_gap=max_gap)
beta = mu_sim / mu_obs
gamma = (std(sim, weighted=weighted, max_gap=max_gap) / mu_sim) / \
(std(obs, weighted=weighted, max_gap=max_gap) / mu_obs)
kge = 1 - sqrt((r - 1) ** 2 + (beta - 1) ** 2 + (gamma - 1) ** 2)
return kge | 974735f9deb8ffedf88711af5c059ac0aae90218 | 11,426 |
def hms_to_dms(h, m, s):
"""
Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)``
tuple.
"""
return degrees_to_dms(hms_to_degrees(h, m, s)) | b99b051699f8a5395fe24e2e909f1690c0e67e4c | 11,427 |
def getChiv6ch(mol):
"""
Chiv6h related to ring 6
"""
return getChivnch(mol, 6) | 31a688b1d0b98b75d81b9c4c5e93bb8d62ee732e | 11,429 |
def is_outside_of_range(type_key: CLTypeKey, value: int) -> bool:
"""Returns flag indicating whether a value is outside of numeric range associated with the CL type.
"""
constraints = NUMERIC_CONSTRAINTS[type_key]
return value < constraints.MIN or value > constraints.MAX | fb8ec41d7edc094242df6bad13b8f32285a86007 | 11,430 |
def read_skel(dset, path):
"""
:param dset: name of dataset, either 'ntu-rgbd' or 'pku-mmd'
:param path: path to the skeleton file
:return:
"""
if dset == 'ntu-rgbd':
file = open(path, 'r')
lines = file.readlines()
num_lines = len(lines)
num_frames = int(lines[0])
# print(num_lines, num_frames)
line_id = 1
data = []
for i in range(num_frames):
num_skels = int(lines[line_id])
# print(num_skels)
joints = []
for _ in range(num_skels):
num_joints = int(lines[line_id+2])
# print(num_joints)
joint = []
for k in range(num_joints):
tmp = lines[line_id+3+k].rstrip().split(' ')
x_3d, y_3d, z_3d, x_depth, y_depth, x_rgb, y_rgb, orientation_w,\
orientation_x, orientation_y, orientation_z = list(
map(float, tmp[:-1]))
joint.append([x_3d, y_3d, z_3d])
joints.append(joint)
line_id += 2+num_joints
joints = np.array(joints)
data.append(joints)
line_id += 1
assert line_id == num_lines
elif dset == 'pku-mmd':
file = open(path, 'r')
lines = file.readlines()
# num_lines = len(lines)
data = []
for line in lines:
joints = list(map(float, line.rstrip().split(' ')))
joints = np.array(joints).reshape(2, -1, 3)
if not np.any(joints[1]):
joints = joints[0][np.newaxis, :, :]
data.append(joints)
elif dset == 'cad-60':
f = open(path, 'r')
lines = f.readlines()
data = []
# Last line is "END"
for line in lines[:-1]:
# fist item is frame number, last item is empty
row = line.split(',')[1:-1]
row = list(map(float, row))
joints = []
for i in range(15):
if i < 11:
# First 11 joints
index = 14 * i + 10
else:
# Joint 12 ~ 15
index = 11 * 14 + (i - 11) * 4
joint = row[index: index+3]
joints.append(joint)
joints = np.array(joints) / 1000.0 # millimeter to meter
joints = joints[np.newaxis, :, :] # To match ntu-rgb format
data.append(joints)
else:
raise NotImplementedError
return data | f461ecb30ec1e7b66ce5d162ccc21b3ba34e6be8 | 11,431 |
def humanize_date(date_string):
""" returns dates as in this form: 'August 24 2019' """
return convert_date(date_string).strftime("%B %d %Y") | d9656af2c4091219d6ee259b557caadbde2cc393 | 11,432 |
def tree_sanity_check(tree: Node) -> bool:
"""
Sanity check for syntax trees: One and the same node must never appear
twice in the syntax tree. Frozen Nodes (EMTPY_NODE, PLACEHOLDER)
should only exist temporarily and must have been dropped or eliminated
before any kind of tree generation (i.e. parsing) or transformation
is finished.
:param tree: the root of the tree to be checked
:returns: `True`, if the tree is "sane", `False` otherwise.
"""
node_set = set() # type: Set[Node]
for node in tree.select_if(lambda nd: True, include_root=True):
if not isinstance(node, Node) or node in node_set or isinstance(node, FrozenNode):
return False
node_set.add(node)
return True | 03b61729c67859bbf9820489ef8fc9768ea59f9f | 11,435 |
def compute_std_error(g,theta,W,Omega,Nobs,Nsim=1.0e+10,step=1.0e-5,args=()):
""" calculate standard errors from minimum-distance type estimation
g should return a vector with:
data moments - simulated moments as a function of theta
Args:
g (callable): moment function (return vector of length J)
theta (np.ndarray): parameter vector (length K)
W (np.ndarray): weigting matrix (dim J-by-J)
Omega (np.ndarray): covaraince matrix of empirical moments (dim J-by-J)
Nobs (scalar): number of observations
Nsim (scalar,optional): number of simulations
step (scalar,optional): finite step in numerical gradients
args (tupple,optinal): additional arguments passed to g
"""
# a. dimensions
K = len(theta)
J = len(W[0])
# b. numerical gradient.
grad = np.empty((J,K))
for p in range(K):
theta_now = theta.copy()
step_now = np.zeros(K)
step_now[p] = np.fmax(step,step*np.abs(theta_now[p]))
g_forward = g(theta_now + step_now,*args)
g_backward = g(theta_now - step_now,*args)
grad[:,p] = (g_forward - g_backward)/(2.0*step_now[p])
# c. asymptotic variance
GW = grad.T @ W
GWG = GW @ grad
Avar = np.linalg.inv(GWG) @ ( GW @ Omega @ GW.T ) @ np.linalg.inv(GWG)
# d. return asymptotic standard errors
fac = (1.0 + 1.0/Nsim)/Nobs
std_error = np.sqrt(fac*np.diag(Avar))
return std_error | 176710e3b6c18efc535c67e257bc1014b4862135 | 11,437 |
def lhs(paramList, trials, corrMat=None, columns=None, skip=None):
"""
Produce an ndarray or DataFrame of 'trials' rows of values for the given parameter
list, respecting the correlation matrix 'corrMat' if one is specified, using Latin
Hypercube (stratified) sampling.
The values in the i'th column are drawn from the ppf function of the i'th parameter
from paramList, and each columns i and j are rank correlated according to corrMat[i,j].
:param paramList: (list of rv-like objects representing parameters) Only requirement
on parameter objects is that they must implement the ppf function.
:param trials: (int) number of trials to generate for each parameter.
:param corrMat: a numpy matrix representing the correlation between the parameters.
corrMat[i,j] should give the correlation between the i'th and j'th
entries of paramlist.
:param columns: (None or list(str)) Column names to use to return a DataFrame.
:param skip: (list of params)) Parameters to process later because they are
dependent on other parameter values (e.g., they're "linked"). These
cannot be correlated.
:return: ndarray or DataFrame with `trials` rows of values for the `paramList`.
"""
ranks = genRankValues(len(paramList), trials, corrMat) if corrMat is not None else None
samples = np.zeros((trials, len(paramList))) # @UndefinedVariable
skip = skip or []
for i, param in enumerate(paramList):
if param in skip:
continue # process later
values = param.ppf(getPercentiles(trials)) # extract values from the RV for these percentiles
if corrMat is None:
# Sequence is a special case for which we don't shuffle (and we ignore stratified sampling)
if param.param.dataSrc.distroName != 'sequence':
np.random.shuffle(values) # randomize the stratified samples
else:
indices = ranks[:, i] - 1 # make them 0-relative
values = values[indices] # reorder to respect correlations
samples[:, i] = values
return DataFrame(samples, columns=columns) if columns else samples | 787042db9773f4da6a2dbb4552269ac2740fb02e | 11,438 |
def percentError(mean, sd, y_output, logits):
""" Calculates the percent error between the prediction and real value.
The percent error is calculated with the formula:
100*(|real - predicted|)/(real)
The real and predicted values are un normalized to see how accurate the true
predictions are. This metric is created in the name scope "percentError".
Input:
* mean: The mean of the original output distribution
* sd: The standard deviation of the original output distribution
* y_output: The y_output symbolic output from the iterator
* logits: The symbolic prediction output from the nerual network
Returns:
* percentErr: An operation which calculates the percent error when
* used in a training or validation run of the network
"""
with tf.name_scope("percentError", values=[y_output, logits]):
predictions= tf.exp(tf.reduce_sum(logits, axis=-1)*sd + mean)
actualValue = tf.exp(y_output*sd + mean)
percentErr = tf.reduce_mean(abs((actualValue-predictions)*100/(actualValue)))
tf.summary.scalar('Percent_Error', percentErr)
return(percentErr) | 017291a71388ccf2ecb8db6808965b164dfbfe3d | 11,439 |
def parse_multiple_files(*actions_files):
"""Parses multiple files. Broadly speaking, it parses sequentially all
files, and concatenates all answers.
"""
return parsing_utils.parse_multiple_files(parse_actions, *actions_files) | 186a984d91d04ae82f79e7d22f24bd834b8a0366 | 11,441 |
def frequent_combinations(spark_df: DataFrame, n=10, export=True):
"""
takes a dataframe containing visitor logs and computes n most frequent visitor-visite pairs
:param spark_df: Spark Dataframe
:param n: number of top visitors
:return: pandas dataframe with visitor-visite pairs
"""
# compute aggregate and convert to pandas for visualization
freq_pairs = spark_df.groupBy(['VISITOR_NAME', 'VISITEE_NAME']).agg( \
count('APPT_START_DATE').alias('Visits')
). \
orderBy('Visits', ascending=False). \
limit(n). \
toPandas()
print(freq_pairs)
# persist
if export:
freq_pairs.to_csv(catalog['business/frequent_pairs'], index=False)
return freq_pairs | 5a4ce5e8199acd1462414fa2de95d4af48923434 | 11,442 |
import random
def reservoir_sampling(items, k):
"""
Reservoir sampling algorithm for large sample space or unknow end list
See <http://en.wikipedia.org/wiki/Reservoir_sampling> for detail>
Type: ([a] * Int) -> [a]
Prev constrain: k is positive and items at least of k items
Post constrain: the length of return array is k
"""
sample = items[0:k]
for i in range(k, len(items)):
j = random.randrange(1, i + 1)
if j <= k:
sample[j] = items[i]
return sample | ab2d0dc2bb3cb399ae7e6889f028503d165fbbe4 | 11,443 |
import json
def create_db_engine(app: Flask) -> Engine:
"""Create and return an engine instance based on the app's database configuration."""
url = URL(
drivername=app.config['DATABASE_DRIVER'],
username=app.config['DATABASE_USER'],
password=app.config['DATABASE_PASSWORD'],
host=app.config['DATABASE_HOST'],
port=app.config['DATABASE_PORT'],
database=app.config['DATABASE_DB']
)
return create_engine(
url,
json_serializer=lambda obj: json.dumps(obj, default=json_serialize_default)
) | 5a67f294b58e699345f517ae07c80851ae30eca9 | 11,444 |
import math
def wgs84_to_gcj02(lat, lng):
"""
WGS84转GCJ02(火星坐标系)
:param lng:WGS84坐标系的经度
:param lat:WGS84坐标系的纬度
:return:
"""
dlat = _transformlat(lng - 105.0, lat - 35.0)
dlng = _transformlng(lng - 105.0, lat - 35.0)
radlat = lat / 180.0 * pi
magic = math.sin(radlat)
magic = 1 - ee * magic * magic
sqrtmagic = math.sqrt(magic)
dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi)
dlng = (dlng * 180.0) / (a / sqrtmagic * math.cos(radlat) * pi)
mglat = lat + dlat
mglng = lng + dlng
return [mglat, mglng] | 64f2d8a088a159c5751838ba1fc00824bcc3e91e | 11,445 |
def _normalize_kwargs(kwargs, kind='patch'):
"""Convert matplotlib keywords from short to long form."""
# Source:
# github.com/tritemio/FRETBursts/blob/fit_experim/fretbursts/burst_plot.py
if kind == 'line2d':
long_names = dict(c='color', ls='linestyle', lw='linewidth',
mec='markeredgecolor', mew='markeredgewidth',
mfc='markerfacecolor', ms='markersize',)
elif kind == 'patch':
long_names = dict(c='color', ls='linestyle', lw='linewidth',
ec='edgecolor', fc='facecolor',)
for short_name in long_names:
if short_name in kwargs:
kwargs[long_names[short_name]] = kwargs.pop(short_name)
return kwargs | 829f4dfd449064f4c1fc92aa8e481364eb997973 | 11,446 |
def fprime_to_jsonable(obj):
"""
Takes an F prime object and converts it to a jsonable type.
:param obj: object to convert
:return: object in jsonable format (can call json.dump(obj))
"""
# Otherwise try and scrape all "get_" getters in a smart way
anonymous = {}
getters = [attr for attr in dir(obj) if attr.startswith("get_")]
for getter in getters:
# Call the get_ functions, and call all non-static methods
try:
func = getattr(obj, getter)
item = func()
# If there is a property named "args" it needs to be handled specifically unless an incoming command
if (
getter == "get_args"
and not "fprime_gds.common.data_types.cmd_data.CmdData"
in str(type(obj))
):
args = []
for arg_spec in item:
arg_dict = {
"name": arg_spec[0],
"description": arg_spec[1],
"value": arg_spec[2].val,
"type": str(arg_spec[2]),
}
if arg_dict["type"] == "Enum":
arg_dict["possible"] = arg_spec[2].keys()
args.append(arg_dict)
# Fill in our special handling
item = args
anonymous[getter.replace("get_", "")] = item
except TypeError:
continue
return anonymous | 899674167b51cd752c7a8aaa9979856218759022 | 11,447 |
import torch
def subsequent_mask(size: int) -> Tensor:
"""
Mask out subsequent positions (to prevent attending to future positions)
Transformer helper function.
:param size: size of mask (2nd and 3rd dim)
:return: Tensor with 0s and 1s of shape (1, size, size)
"""
mask = np.triu(np.ones((1, size, size)), k=1).astype("uint8")
return torch.from_numpy(mask) == 0 | e065c32164d5250215c846aef39d510f6a93f0cd | 11,448 |
def process_entries(components):
"""Process top-level entries."""
data = {}
for index, value in enumerate(STRUCTURE):
label = value[0]
mandatory = value[1]
# Raise error if mandatory elements are missing
if index >= len(components):
if mandatory is True:
raise ValueError('UNH header is missing mandatory entry for {label}'.format(label=label))
else:
break
# Process
if len(value) == LENGTH_OF_A_SIMPLE_ENTRY:
data[label] = components[index]
elif len(value) == LENGTH_OF_A_NESTED_ENTRY:
data[label] = process_subentries(components, index)
else:
raise ValueError('unexpected structure')
return data | 344b9aa601b71fd9352fdb412d9dfa7492312d1a | 11,449 |
from typing import Tuple
def normalize_input_vector(trainX: np.ndarray, testX: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize the input vector
Args:
trainX (np.ndarray): train embedding array.
testX (np.ndarray): test embedding array.
Returns:
np.ndarray, np.ndarray: normalized train and test arrays.
"""
in_encoder = Normalizer(norm='l2')
trainX = in_encoder.transform(trainX)
testX = in_encoder.transform(testX)
return trainX, testX | 34324541e7db302bd46d41f70bd7fdedb6055ae6 | 11,450 |
def update_cache(cache_data, new_data, key):
"""
Add newly collected data to the pre-existing cache data
Args:
cache_data (dict): Pre-existing chip data
new_data (dict): Newly acquired chip data
key (str): The chip UL coordinates
Returns:
"""
if key in cache_data.keys():
cache_data[key].update(new_data[key])
else:
cache_data[key] = new_data[key]
return cache_data | f439f34d1e95ccd69dc10d5f8c06ca20fc869b1e | 11,451 |
def status(command, **keys):
"""Run a subprogram capturing it's output and return the exit status."""
return _captured_output(command, **keys).status | f2bb97448a812548dfbdea770db9a43d8c46301a | 11,452 |
def normalize(flow: Tensor) -> Tensor:
"""Re-scales the optical flow vectors such that they correspond to motion on the normalized pixel coordinates
in the range [-1, 1] x [-1, 1].
Args:
flow: the optical flow tensor of shape (B, 2, H, W)
Returns:
The optical flow tensor with flow vectors rescaled to the normalized pixel coordinate system.
"""
# flow: (B, 2, H, W)
assert flow.size(1) == 2
h, w = flow.shape[-2:]
return scale(flow, (2.0 / max(w - 1, 1), 2.0 / max(h - 1, 1))) | 9164686650e0728ba1d99b65e5757b0e12d6c934 | 11,453 |
def to_graph6_bytes(G, nodes=None, header=True):
"""Convert a simple undirected graph to bytes in graph6 format.
Parameters
----------
G : Graph (undirected)
nodes: list or iterable
Nodes are labeled 0...n-1 in the order provided. If None the ordering
given by ``G.nodes()`` is used.
header: bool
If True add '>>graph6<<' bytes to head of data.
Raises
------
NetworkXNotImplemented
If the graph is directed or is a multigraph.
ValueError
If the graph has at least ``2 ** 36`` nodes; the graph6 format
is only defined for graphs of order less than ``2 ** 36``.
Examples
--------
>>> nx.to_graph6_bytes(nx.path_graph(2))
b'>>graph6<<A_\\n'
See Also
--------
from_graph6_bytes, read_graph6, write_graph6_bytes
Notes
-----
The returned bytes end with a newline character.
The format does not support edge or node labels, parallel edges or
self loops. If self loops are present they are silently ignored.
References
----------
.. [1] Graph6 specification
<http://users.cecs.anu.edu.au/~bdm/data/formats.html>
"""
if nodes is not None:
G = G.subgraph(nodes)
H = nx.convert_node_labels_to_integers(G)
nodes = sorted(H.nodes())
return b"".join(_generate_graph6_bytes(H, nodes, header)) | 05617e6ebe6d4a374bfa125e3b5afb1bca3304c1 | 11,454 |
def get_arrays_from_img_label(img, label, img_mode=None):
"""Transform a SimpleITK image and label map into numpy arrays, and
optionally select a channel.
Parameters:
img (SimpleITK.SimpleITK.Image): image
label (SimpleITK.SimpleITK.Image): label map
img_mode (int or None): optional mode channel, so output is 3D
Returns:
(numpy.ndarray, numpy.ndarray): image and label in numpy format
"""
img_np = sitk.GetArrayFromImage(img)
if img_mode is not None:
img_np = img_np[img_mode]
label_np = sitk.GetArrayFromImage(label)
return img_np, label_np.astype(int) | 902cd2cd5f31121e4a57a335a37b42b4caeafb4a | 11,455 |
def _get_error_code(exception):
"""Get the most specific error code for the exception via superclass"""
for exception in exception.mro():
try:
return error_codes[exception]
except KeyError:
continue | c4c9a2ec2f5cf510b6e9a7f6058287e4faf7b5b4 | 11,456 |
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered | 0f1c1207016695ee1440fea214f8f92f8c6398ac | 11,457 |
import json
def read_json_file(file_name: str, encoding: str = "utf-8") -> dict:
"""Reads a json file
:param file_name: path
:param encoding: encoding to use
:return: dict content
"""
with open(file_name, "r", encoding=encoding) as json_file:
return json.load(json_file) | 313aee72b06303dfffd8a2e9f3641d1346329a91 | 11,458 |
def pretty_print_row(col_full_widths, row, max_field_size):
"""
pretty print a row such that each column is padded to have the widths in the col_full_widths vector
"""
start = "| "
if len(row) == len(col_full_widths):
end = " |"
else:
end = "|"
return start + "|".join(pretty_print_field(full_width, field, max_field_size) for full_width, field in zip(col_full_widths, row)) + end | c94807e4de18e4454e0263e25f4103cd914df2cd | 11,459 |
def _get_data_for_agg(new_svarcube, new_tacube):
"""Reshape data for use in iris aggregator based on two cubes."""
dims_to_collapse = set()
dims_to_collapse.update(new_svarcube.coord_dims('air_pressure'))
untouched_dims = set(range(new_svarcube.ndim)) -\
set(dims_to_collapse)
dims = list(untouched_dims) + list(dims_to_collapse)
unrolled_data = np.moveaxis(new_tacube.data, dims,
range(new_svarcube.ndim))
return unrolled_data | 51c2683e3477528809fcf229c94125020cdfee6d | 11,460 |
def refs_changed_by_other_cc(current_user):
"""
Return dictionary with id of reference and log object changed by other cooperative centers
"""
current_user_cc = current_user.profile.get_attribute('cc')
result_list = defaultdict(list)
# get last references of current user cooperative center
refs_from_cc = Reference.objects.filter(cooperative_center_code=current_user_cc).order_by('-id')[:100]
for reference in refs_from_cc:
# get correct class (source our analytic)
c_type = reference.get_content_type_id
# filter by logs of current reference, change type and made by other users
log_list = LogEntry.objects.filter(object_id=reference.id, content_type=c_type, action_flag=2) \
.exclude(user=current_user).order_by('-id')
if log_list:
# exclude from list all changes that was already reviewed (logreview is created)
log_list = log_list.exclude(logreview__isnull=False)
# create list of log users of same cc
exclude_user_list = []
for log in log_list:
log_user_cc = log.user.profile.get_attribute('cc')
if log_user_cc == current_user_cc:
exclude_user_list.append(log.user)
# exclude from log list users from same cc as current user
if exclude_user_list:
log_list = log_list.exclude(user__in=exclude_user_list)
if log_list:
# group result by id (one line for each reference)
for log in log_list:
result_list[log.object_id] = log
return result_list | aa2012f1efe6eeb796e3871af691b685e3388e67 | 11,461 |
from typing import Dict
def chain_head(head: int, child: int, heads: Dict[int, int]):
"""
>>> chain_head(0, 2, {1: 2, 2: 3, 3: 0})
True
>>> chain_head(2, 0, {1: 2, 2: 3, 3: 0})
False
"""
curr_child = child
while curr_child != -1:
if curr_child == head:
return True
curr_child = heads.get(curr_child, -1)
return False | d786d3dbbdc496a1a7515d9df04fa2a09968b87d | 11,462 |
def UpgradeFile(file_proto):
"""In-place upgrade a FileDescriptorProto from v2[alpha\d] to v3alpha.
Args:
file_proto: v2[alpha\d] FileDescriptorProto message.
"""
# Upgrade package.
file_proto.package = UpgradedType(file_proto.package)
# Upgrade imports.
for n, d in enumerate(file_proto.dependency):
file_proto.dependency[n] = UpgradedPath(d)
# Upgrade comments.
for location in file_proto.source_code_info.location:
location.leading_comments = UpgradedComment(location.leading_comments)
location.trailing_comments = UpgradedComment(location.trailing_comments)
for n, c in enumerate(location.leading_detached_comments):
location.leading_detached_comments[n] = UpgradedComment(c)
# Upgrade services.
for s in file_proto.service:
UpgradeService(s)
# Upgrade messages.
for m in file_proto.message_type:
UpgradeMessage(m)
for e in file_proto.enum_type:
UpgradeEnum(e)
return file_proto | 942646c67bb987757449fbb16a6164008957cf99 | 11,463 |
import ipaddress
import logging
def _get_ip_block(ip_block_str):
""" Convert string into ipaddress.ip_network. Support both IPv4 or IPv6
addresses.
Args:
ip_block_str(string): network address, e.g. "192.168.0.0/24".
Returns:
ip_block(ipaddress.ip_network)
"""
try:
ip_block = ipaddress.ip_network(ip_block_str)
except ValueError:
logging.error("Invalid IP block format: %s", ip_block_str)
return None
return ip_block | b887c615091926ed7ebbbef8870e247348e2aa27 | 11,464 |
def mul_ntt(f_ntt, g_ntt, q):
"""Multiplication of two polynomials (coefficient representation)."""
assert len(f_ntt) == len(g_ntt)
deg = len(f_ntt)
return [(f_ntt[i] * g_ntt[i]) % q for i in range(deg)] | 504838bb812792b6bb83b1d485e4fb3221dec36e | 11,465 |
def _expected_datatypes(product_type):
"""
Aux function. Contains the most current lists of keys we expect there to be in the different forms of metadata.
"""
if product_type == "SLC":
# Only the datetimes need to be parsed.
expected_dtypes = {
"acquisition_start_utc": "parse_datetime_single",
"acquisition_end_utc": "parse_datetime_single",
"dc_estimate_time_utc": "parse_datetime_single",
"first_pixel_time_utc": "parse_datetime_single",
"state_vector_time_utc": "parse_datetime_vect",
"zerodoppler_start_utc": "parse_datetime_single",
"zerodoppler_end_utc": "parse_datetime_single",
}
elif product_type == "GRD":
# All the fields need to be parsed, so all the datatypes are input.
expected_dtypes = {
"acquisition_end_utc": "parse_datetime_single", # single datetime
"acquisition_mode": str,
"acquisition_prf": float,
"acquisition_start_utc": str,
"ant_elev_corr_flag": bool,
"area_or_point": str,
"avg_scene_height": float,
"azimuth_spacing": float,
"azimuth_look_bandwidth": float,
"azimuth_look_overlap": float,
"azimuth_looks": int,
"azimuth_time_interval": float,
"calibration_factor": float,
"carrier_frequency": float,
"chirp_bandwidth": float,
"chirp_duration": float,
"coord_center": "parse_float_vect", # 1d vect of floats, needs to be parsed
"coord_first_far": "parse_float_vect",
"coord_first_near": "parse_float_vect",
"coord_last_far": "parse_float_vect",
"coord_last_near": "parse_float_vect",
"dc_estimate_coeffs": "parse_float_vect",
"dc_estimate_poly_order": int,
"dc_estimate_time_utc": "parse_datetime_vect", # datetime vector
"dc_reference_pixel_time": float,
"doppler_rate_coeffs": "parse_float_vect",
"doppler_rate_poly_order": int,
"doppler_rate_reference_pixel_time": float,
"gcp_terrain_model": str,
"geo_ref_system": str,
"grsr_coefficients": "parse_float_vect",
"grsr_ground_range_origin": float,
"grsr_poly_order": int,
"grsr_zero_doppler_time": "parse_datetime_single", # single datetime
"heading": float,
"incidence_angle_coefficients": "parse_float_vect",
"incidence_angle_ground_range_origin": float,
"incidence_angle_poly_order": int,
"incidence_angle_zero_doppler_time": "parse_datetime_single", # single datetime
"incidence_center": float,
"incidence_far": float,
"incidence_near": float,
"look_side": str,
"mean_earth_radius": float,
"mean_orbit_altitude": float,
"number_of_azimuth_samples": int,
"number_of_dc_estimations": int,
"number_of_range_samples": int,
"number_of_state_vectors": int,
"orbit_absolute_number": int,
"orbit_direction": str,
"orbit_processing_level": str,
"orbit_relative_number": int,
"orbit_repeat_cycle": int,
"polarization": str,
"posX": "parse_float_vect",
"posY": "parse_float_vect",
"posZ": "parse_float_vect",
"processing_prf": float,
"processing_time": "parse_datetime_single", # single datetime
"processor_version": str,
"product_file": str,
"product_level": str,
"product_name": str,
"product_type": str,
"range_looks": int,
"range_sampling_rate": float,
"range_spacing": float,
"range_spread_comp_flag": bool,
"sample_precision": str,
"satellite_look_angle": str,
"satellite_name": str,
"slant_range_to_first_pixel": float,
"state_vector_time_utc": "parse_datetime_vect", # 1d vect of datetimes, need to be parsed.
"total_processed_bandwidth_azimuth": float,
"velX": "parse_float_vect",
"velY": "parse_float_vect",
"velZ": "parse_float_vect",
"window_function_azimuth": str,
"window_function_range": str,
"zerodoppler_end_utc": "parse_datetime_single", # single datetime
"zerodoppler_start_utc": "parse_datetime_single", # single datetime
}
elif product_type == "xml":
raise NotImplementedError
elif not isinstance(product_type, str):
raise TypeError(
'Did not understand input "product_type", a str was expected but a %s datatype variable was input.'
% type(product_type)
)
else:
raise ValueError(
'Did not understand input "product_type", either "SLC", "GRD" or "xml" was expected but %s was input.'
% product_type
)
return expected_dtypes | ea5a2d78bc5693259955e60847de7a663dcdbf2c | 11,467 |
from typing import List
import copy
def convert_vecs_to_var(
c_sys: CompositeSystem, vecs: List[np.ndarray], on_para_eq_constraint: bool = True
) -> np.ndarray:
"""converts hs of povm to vec of variables.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of this state.
vecs : List[np.ndarray]
list of vec of povm elements.
on_para_eq_constraint : bool, optional
uses equal constraints, by default True.
Returns
-------
np.ndarray
list of vec of variables.
"""
var = copy.copy(vecs)
if on_para_eq_constraint:
del var[-1]
var = np.hstack(var)
return var | cea5d80a7213e12113b1bec425b142667f9bb36e | 11,468 |
def timetable_to_subrip(aligned_timetable):
"""
Converts the aligned timetable into the SubRip format.
Args:
aligned_timetable (list[dict]):
An aligned timetable that is output by the `Aligner` class.
Returns:
str:
Text representing a SubRip file.
"""
# Define a variable to contain the file's contents
file_contents = ""
# Process each block
for i, block in enumerate(aligned_timetable):
# Define a temporary variable to store this caption block
block_text = f"{i + 1}\n" # Every SubRip caption block starts with a number
# Get the start and end time of the block
start_time = timedelta_to_subrip_time(timedelta(seconds=block["start_time"]))
end_time = timedelta_to_subrip_time(timedelta(seconds=block["end_time"]))
# Add the timing line to the block of text
block_text += f"{start_time} --> {end_time}\n"
# Add the line of text from the `block` to the block of text
block_text += block["text"] + "\n\n"
# Add the `block_text` to the `file_contents`
file_contents += block_text
# Return the final file's contents
return file_contents | cddcf115ccb9441966d9c1a0a2b67ba25e00e6da | 11,470 |
from re import T
def add(a: T.Tensor, b: T.Tensor) -> T.Tensor:
"""
Add tensor a to tensor b using broadcasting.
Args:
a: A tensor
b: A tensor
Returns:
tensor: a + b
"""
return a + b | a555de4341b874163c551fff4b7674af1e60ace2 | 11,471 |
def integrate(que):
"""
check if block nears another block and integrate them
@param que: init blocks
@type que: deque
@return: integrated block
@rtype: list
"""
blocks = []
t1, y, x = que.popleft()
blocks.append([y, x])
if t1 == 2:
blocks.append([y, x + 1])
elif t1 == 3:
blocks.append([y + 1, x])
return blocks | a91235f34e1151b6dd9c6c266658cca86b375278 | 11,472 |
def test_binary_query(cbcsdk_mock):
"""Testing Binary Querying"""
called = False
def post_validate(url, body, **kwargs):
nonlocal called
if not called:
called = True
assert body['expiration_seconds'] == 3600
else:
assert body['expiration_seconds'] == 10
return BINARY_GET_FILE_RESP
sha256 = "00a16c806ff694b64e566886bba5122655eff89b45226cddc8651df7860e4524"
cbcsdk_mock.mock_request("GET", f"/ubs/v1/orgs/test/sha256/{sha256}", BINARY_GET_METADATA_RESP)
api = cbcsdk_mock.api
binary = api.select(Binary, sha256)
assert isinstance(binary, Binary)
cbcsdk_mock.mock_request("GET", f"/ubs/v1/orgs/test/sha256/{sha256}/summary/device", BINARY_GET_DEVICE_SUMMARY_RESP)
summary = binary.summary
cbcsdk_mock.mock_request("POST", "/ubs/v1/orgs/test/file/_download", post_validate)
url = binary.download_url()
assert summary is not None
assert url is not None
url = binary.download_url(expiration_seconds=10)
assert url is not None | 5cfd7c7d1ab714b342e13c33cf896032f8387cde | 11,473 |
import typing
def parse_struct_encoding(struct_encoding: bytes) -> typing.Tuple[typing.Optional[bytes], typing.Sequence[bytes]]:
"""Parse an array type encoding into its name and field type encodings."""
if not struct_encoding.startswith(b"{"):
raise ValueError(f"Missing opening brace in struct type encoding: {struct_encoding!r}")
if not struct_encoding.endswith(b"}"):
raise ValueError(f"Missing closing brace in struct type encoding: {struct_encoding!r}")
try:
# Stop searching for the equals if an opening brace
# (i. e. the start of another structure type encoding)
# is reached.
# This is necessary to correctly handle struct types with no name that contain a struct type with a name,
# such as b"{{foo=ii}}" (an unnamed struct containing a struct named "foo" containing two integers).
try:
end = struct_encoding.index(b"{", 1)
except ValueError:
end = -1
equals_pos = struct_encoding.index(b"=", 1, end)
except ValueError:
name = None
field_type_encoding_string = struct_encoding[1:-1]
else:
name = struct_encoding[1:equals_pos]
field_type_encoding_string = struct_encoding[equals_pos+1:-1]
field_type_encodings = list(split_encodings(field_type_encoding_string))
return name, field_type_encodings | 47455b192049b976dce392b928932d8291d1d008 | 11,474 |
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : Slot19
A Slot19 object
Returns
-------
point_list: list
A list of Points
"""
Rbo = self.get_Rbo()
# alpha is the angle to rotate Z0 so ||Z1,Z8|| = W0
alpha0 = self.comp_angle_opening() / 2
alpha1 = self.comp_angle_bottom() / 2
# comp point coordinate (in complex)
Z_ = Rbo * exp(1j * 0)
Z0 = Z_ * exp(1j * alpha0)
if self.is_outwards():
Z1 = (Rbo + self.H0) * exp(1j * alpha1)
else: # inward slot
Z1 = (Rbo - self.H0) * exp(1j * alpha1)
# symetry
Z2 = Z1.conjugate()
Z3 = Z0.conjugate()
return [Z3, Z2, Z1, Z0] | c74c28af57ea90f208ac61cd2433376e9c1a47ac | 11,475 |
import inspect
def getargspec(func):
"""Like inspect.getargspec but supports functools.partial as well."""
if inspect.ismethod(func):
func = func.__func__
if type(func) is partial:
orig_func = func.func
argspec = getargspec(orig_func)
args = list(argspec[0])
defaults = list(argspec[3] or ())
kwoargs = list(argspec[4])
kwodefs = dict(argspec[5] or {})
if func.args:
args = args[len(func.args):]
for arg in func.keywords or ():
try:
i = args.index(arg) - len(args)
del args[i]
try:
del defaults[i]
except IndexError:
pass
except ValueError: # must be a kwonly arg
i = kwoargs.index(arg)
del kwoargs[i]
del kwodefs[arg]
return inspect.FullArgSpec(args, argspec[1], argspec[2],
tuple(defaults), kwoargs,
kwodefs, argspec[6])
while hasattr(func, '__wrapped__'):
func = func.__wrapped__
if not inspect.isfunction(func):
raise TypeError('%r is not a Python function' % func)
return inspect.getfullargspec(func) | df1745daaf7cad09d75937cce399d705ce10de2b | 11,476 |
from typing import Optional
from typing import Union
from typing import Tuple
from typing import Dict
def empirical_kernel_fn(f: ApplyFn,
trace_axes: Axes = (-1,),
diagonal_axes: Axes = ()
) -> EmpiricalKernelFn:
"""Returns a function that computes single draws from NNGP and NT kernels.
Args:
f:
the function whose NTK we are computing. `f` should have the signature
`f(params, inputs[, rng])` and should return an `np.ndarray` outputs.
trace_axes:
output axes to trace the output kernel over, i.e. compute only the trace
of the covariance along the respective pair of axes (one pair for each
axis in `trace_axes`). This allows to save space and compute if you are
only interested in the respective trace, but also improve approximation
accuracy if you know that covariance along these pairs of axes converges
to a `constant * identity matrix` in the limit of interest (e.g.
infinite width or infinite `n_samples`). A common use case is the channel
/ feature / logit axis, since activation slices along such axis are i.i.d.
and the respective covariance along the respective pair of axes indeed
converges to a constant-diagonal matrix in the infinite width or infinite
`n_samples` limit.
Also related to "contracting dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
diagonal_axes:
output axes to diagonalize the output kernel over, i.e. compute only the
diagonal of the covariance along the respective pair of axes (one pair for
each axis in `diagonal_axes`). This allows to save space and compute, if
off-diagonal values along these axes are not needed, but also improve
approximation accuracy if their limiting value is known theoretically,
e.g. if they vanish in the limit of interest (e.g. infinite
width or infinite `n_samples`). If you further know that on-diagonal
values converge to the same constant in your limit of interest, you should
specify these axes in `trace_axes` instead, to save even more compute and
gain even more accuracy. A common use case is computing the variance
(instead of covariance) along certain axes.
Also related to "batch dimensions" in XLA terms.
(https://www.tensorflow.org/xla/operation_semantics#dotgeneral)
Returns:
A function to draw a single sample the NNGP and NTK empirical kernels of a
given network `f`.
"""
kernel_fns = {
'nngp': empirical_nngp_fn(f, trace_axes, diagonal_axes),
'ntk': empirical_ntk_fn(f, trace_axes, diagonal_axes)
}
@utils.get_namedtuple('EmpiricalKernel')
def kernel_fn(x1: np.ndarray,
x2: Optional[np.ndarray],
get: Union[None, str, Tuple[str, ...]],
params: PyTree,
**apply_fn_kwargs) -> Dict[str, np.ndarray]:
"""Computes a single sample of the empirical kernel of type `get`.
Args:
x1:
first batch of inputs.
x2:
second batch of inputs. `x2=None` means `x2=x1`. `f(x2)` must have a
matching shape with `f(x1)` on `trace_axes` and `diagonal_axes`.
get:
type of the empirical kernel. `get=None` means `get=("nngp", "ntk")`.
Can be a string (`"nngp"`) or a tuple of strings (`("ntk", "nngp")`).
params:
A `PyTree` of parameters about which we would like to compute the
neural tangent kernel.
**apply_fn_kwargs:
keyword arguments passed to `apply_fn`. `apply_fn_kwargs` will be split
into `apply_fn_kwargs1` and `apply_fn_kwargs2` by the `_split_kwargs`
function which will be passed to `apply_fn`. In particular, the rng key
in `apply_fn_kwargs`, will be split into two different (if `x1!=x2`) or
same (if `x1==x2`) rng keys. See the `_read_key` function for more
details.
Returns:
A single sample of the empirical kernel. The shape is "almost"
`zip(f(x1).shape, f(x2).shape)` except for:
1) `trace_axes` are absent as they are contracted over.
2) `diagonal_axes` are present only once.
All other axes are present twice.
If `get` is a string, returns the requested `np.ndarray`. If `get` is a
tuple, returns an `EmpiricalKernel` namedtuple containing the
requested information.
"""
if get is None:
get = ('nngp', 'ntk')
return {g: kernel_fns[g](x1, x2, params, **apply_fn_kwargs)
for g in get} # pytype: disable=wrong-arg-count
return kernel_fn | 890de1ebdd5f41f5aa257cedf1f03325f11e707c | 11,477 |
def read_image_batch(image_paths, image_size=None, as_list=False):
"""
Reads image array of np.uint8 and shape (num_images, *image_shape)
* image_paths: list of image paths
* image_size: if not None, image is resized
* as_list: if True, return list of images,
else return np.ndarray (default)
:return: np.ndarray or list
"""
images = None
for i, image_path in enumerate(image_paths):
im = load_img(image_path)
if image_size is not None:
im = im.resize(image_size, Image.LANCZOS)
x = img_to_array(im).astype(np.uint8)
if images is None:
if not as_list:
images = np.zeros((len(image_paths),) + x.shape,
dtype=np.uint8)
else: images = []
if not as_list: images[i, ...] = x
else: images.append(x)
return images | 7ee4e01682c5175a6b22db5d48acdb76471d03da | 11,478 |
def dc_vm_backup(request, dc, hostname):
"""
Switch current datacenter and redirect to VM backup page.
"""
dc_switch(request, dc)
return redirect('vm_backup', hostname=hostname) | 168576ac2b3384c1e35a1f972b7362a9ba379582 | 11,479 |
def compute_total_distance(path):
"""compute total sum of distance travelled from path list"""
path_array = np.diff(np.array(path), axis=0)
segment_distance = np.sqrt((path_array ** 2).sum(axis=1))
return np.sum(segment_distance) | c0c4d0303bdeaafdfda84beb65fd4e60a4ff7436 | 11,480 |
def get_relative_positions_matrix(length_x, length_y, max_relative_position):
"""Generates matrix of relative positions between inputs."""
range_vec_x = tf.range(length_x)
range_vec_y = tf.range(length_y)
# shape: [length_x, length_y]
distance_mat = tf.expand_dims(range_vec_x, -1) - tf.expand_dims(range_vec_y, 0)
distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position,
max_relative_position)
# Shift values to be >= 0. Each integer still uniquely identifies a relative
# position difference.
final_mat = distance_mat_clipped + max_relative_position
return final_mat | 661cabfbcb3e8566dd8d9ec4e56a71a4d62091fd | 11,481 |
def func_split_item(k):
""" Computes the expected value and variance of the splitting item random variable S.
Computes the expression (26b) and (26c) in Theorem 8. Remember that r.v. S is the value of index s
such that $\sum_{i=1}^{s-1} w(i) \leq k$ and $\sum_{i=1}^s w(i) > k$.
Args:
k: Int. The capacity of the Knapsack Problem instance.
Returns:
s: float. The expected value of the splitting item random variable.
var_split: float. The variance of the splitting item random variable.
"""
b = 1 + 1 / k # Defining a cumbersome base
s = (1 + 1 / k) ** k # Computing the split item
var_split = (3 + 1 / k) * b ** (k - 1) - b ** (2 * k) # Computing the variance of the split item
return s, var_split | 84ec7f4d76ced51ebdbd28efdc252b5ff3809e79 | 11,482 |
def eq(equation: str) -> int:
"""Evaluate the equation."""
code = compile(equation, "<string>", "eval")
return eval(code) | 5e88cad8009dc3dcaf36b216fa217fbadfaa50b3 | 11,483 |
def is_client_in_data(hass: HomeAssistant, unique_id: str) -> bool:
"""Check if ZoneMinder client is in the Home Assistant data."""
prime_config_data(hass, unique_id)
return const.API_CLIENT in hass.data[const.DOMAIN][const.CONFIG_DATA][unique_id] | 740e74b2d77bcf29aba7d2548930a98ec508fec0 | 11,484 |
from datetime import datetime
def parse_date(datestr):
""" Given a date in xport format, return Python date. """
return datetime.strptime(datestr, "%d%b%y:%H:%M:%S") | b802a528418a24300aeba3e33e9df8a268f0a27b | 11,485 |
def generate_database(m, n, uni_range_low=None, uni_range_high=None, exact_number=False):
"""
- Generate Universe by picking n random integers from low (inclusive) to high (exclusive).
If exact_number, then Universe.size == n
- Generate a Database of m records, over the Universe
"""
# generate Universe
if exact_number:
objects = range(n)
else:
objects = list(np.random.randint(uni_range_low, uni_range_high, size=n))
uni = Universe(objects)
# generate Database
db = uni.random_db(m)
return db | a6fad1192d0c286f7fdb585933b5648f0ee9cb4c | 11,486 |
from Foundation import NSUserDefaults as NSUD
def interface_style():
"""Return current platform interface style (light or dark)."""
try: # currently only works on macOS
except ImportError:
return None
style = NSUD.standardUserDefaults().stringForKey_("AppleInterfaceStyle")
if style == "Dark":
return "dark"
else:
return "light" | 5c30da34a3003ec52c3f97fb86dbf2ba73101a88 | 11,487 |
def get_num_forces(cgmodel):
"""
Given a CGModel() class object, this function determines how many forces we are including when evaluating the energy.
:param cgmodel: CGModel() class object
:type cgmodel: class
:returns:
- total_forces (int) - Number of forces in the coarse grained model
:Example:
>>> from foldamers.cg_model.cgmodel import CGModel
>>> cgmodel = CGModel()
>>> total_number_forces = get_num_forces(cgmodel)
"""
total_forces = 0
if cgmodel.include_bond_forces:
total_forces = total_forces + 1
if cgmodel.include_nonbonded_forces:
total_forces = total_forces + 1
if cgmodel.include_bond_angle_forces:
total_forces = total_forces + 1
if cgmodel.include_torsion_forces:
total_forces = total_forces + 1
return total_forces | 5f5b897f1b0def0b858ca82319f9eebfcf75454a | 11,488 |
def cybrowser_dialog(id=None, text=None, title=None, url=None, base_url=DEFAULT_BASE_URL):
"""Launch Cytoscape's internal web browser in a separate window
Provide an id for the window if you want subsequent control of the window e.g., via cybrowser hide.
Args:
id (str): The identifier for the new browser window
text (str): HTML text to initially load into the browser
title (str): Text to be shown in the title bar of the browser window
url (str): The URL the browser should load
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
dict: {'id': id} where ``id`` is the one provided as a parameter to this function
Raises:
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> cybrowser_dialog(id='Test Window', title='Hello Africa', text='<HTML><HEAD><TITLE>Hello</TITLE></HEAD><BODY>Hello, world!</BODY></HTML>')
{'id': 'Test Window'}
>>> cybrowser_dialog(id='CytoWindow', title='Cytoscape Home Page', url='http://www.cytoscape.org')
{'id': 'CytoWindow'}
See Also:
:meth:`cybrowser_show`, :meth:`cybrowser_hide`
"""
id_str = f' id="{id}"' if id else ''
text_str = f' text="{text}"' if text else ''
title_str = f' title="{title}"' if title else ''
url_str = f' url="{url}"' if url else ''
res = commands.commands_post(f'cybrowser dialog{id_str}{text_str}{title_str}{url_str}', base_url=base_url)
return res | d892fe1a4e48cba8f8561fbe208aec7e2cb4afd7 | 11,489 |
def initialize_stat_dict():
"""Initializes a dictionary which will hold statistics about compositions.
Returns:
A dictionary containing the appropriate fields initialized to 0 or an
empty list.
"""
stat_dict = dict()
for lag in [1, 2, 3]:
stat_dict['autocorrelation' + str(lag)] = []
stat_dict['notes_not_in_key'] = 0
stat_dict['notes_in_motif'] = 0
stat_dict['notes_in_repeated_motif'] = 0
stat_dict['num_starting_tonic'] = 0
stat_dict['num_repeated_notes'] = 0
stat_dict['num_octave_jumps'] = 0
stat_dict['num_fifths'] = 0
stat_dict['num_thirds'] = 0
stat_dict['num_sixths'] = 0
stat_dict['num_seconds'] = 0
stat_dict['num_fourths'] = 0
stat_dict['num_sevenths'] = 0
stat_dict['num_rest_intervals'] = 0
stat_dict['num_special_rest_intervals'] = 0
stat_dict['num_in_key_preferred_intervals'] = 0
stat_dict['num_resolved_leaps'] = 0
stat_dict['num_leap_twice'] = 0
stat_dict['num_high_unique'] = 0
stat_dict['num_low_unique'] = 0
return stat_dict | 42a10b93a960663a42260e1a77d0e8f5a4ff693a | 11,490 |
def nrrd_to_nii(file):
"""
A function that converts the .nrrd atlas to .nii file format
Parameters
----------
file: tuples
Tuple of coronal, sagittal, and horizontal slices you want to view
Returns
-------
F_im_nii: nibabel.nifti2.Nifti2Image
A nifti file format that is used by various medical imaging techniques.
Notes
-------
From: #from: https://nipy.org/nibabel/coordinate_systems.html
"""
_nrrd = nrrd.read(file)
data = _nrrd[0]
header = _nrrd[1] # noqa: F841
F_im_nii = nib.Nifti2Image(data, np.eye(4))
return F_im_nii | 240e94758ef3f52d4e9a4ebb6f0574ade13a1044 | 11,491 |
def reqeustVerifyAuthhandler(request):
"""
본인인증 전자서명을 요청합니다.
- 본인인증 서비스에서 이용기관이 생성하는 Token은 사용자가 전자서명할 원문이 됩니다. 이는 보안을 위해 1회용으로 생성해야 합니다.
- 사용자는 이용기관이 생성한 1회용 토큰을 서명하고, 이용기관은 그 서명값을 검증함으로써 사용자에 대한 인증의 역할을 수행하게 됩니다.
"""
try:
# Kakaocert 이용기관코드, Kakaocert 파트너 사이트에서 확인
clientCode = '020040000001'
# 본인인증 요청정보 객체
requestObj = RequestVerifyAuth(
# 고객센터 전화번호, 카카오톡 인증메시지 중 "고객센터" 항목에 표시
CallCenterNum = '1600-8536',
# 인증요청 만료시간(초), 최대값 1000, 인증요청 만료시간(초) 내에 미인증시 만료 상태로 처리됨
Expires_in = 60,
# 수신자 생년월일, 형식 : YYYYMMDD
ReceiverBirthDay = '19900108',
# 수신자 휴대폰번호
ReceiverHP = '01043245117',
# 수신자 성명
ReceiverName = '정요한',
# 별칭코드, 이용기관이 생성한 별칭코드 (파트너 사이트에서 확인가능)
# 카카오톡 인증메시지 중 "요청기관" 항목에 표시
# 별칭코드 미 기재시 이용기관의 이용기관명이 "요청기관" 항목에 표시
SubClientID = '',
# 인증요청 메시지 부가내용, 카카오톡 인증메시지 중 상단에 표시
TMSMessage = 'TMSMessage0423',
# 인증요청 메시지 제목, 카카오톡 인증메시지 중 "요청구분" 항목에 표시
TMSTitle = 'TMSTitle 0423',
# 은행계좌 실명확인 생략여부
# true : 은행계좌 실명확인 절차를 생략
# false : 은행계좌 실명확인 절차를 진행
# 카카오톡 인증메시지를 수신한 사용자가 카카오인증 비회원일 경우, 카카오인증 회원등록 절차를 거쳐 은행계좌 실명확인 절차를 밟은 다음 전자서명 가능
isAllowSimpleRegistYN = False,
# 수신자 실명확인 여부
# true : 카카오페이가 본인인증을 통해 확보한 사용자 실명과 ReceiverName 값을 비교
# false : 카카오페이가 본인인증을 통해 확보한 사용자 실명과 RecevierName 값을 비교하지 않음.
isVerifyNameYN = True,
# 전자서명할 토큰 원문
Token = 'Token Value 2345',
# PayLoad, 이용기관이 생성한 payload(메모) 값
PayLoad = 'Payload123',
)
result = kakaocertService.requestVerifyAuth(clientCode, requestObj)
return render(request, 'response.html', {'receiptId': result.receiptId})
except KakaocertException as KE:
return render(request, 'exception.html', {'code': KE.code, 'message': KE.message}) | 75bce664f11804ed0abb649ce80ef261ebfd0a34 | 11,492 |
import ipaddress
import six
def cidr_validator(value, return_ip_interface=False):
"""Validate IPv4 + optional subnet in CIDR notation"""
try:
if '/' in value:
ipaddr, netmask = value.split('/')
netmask = int(netmask)
else:
ipaddr, netmask = value, 32
if not validators.ipv4_re.match(ipaddr) or not 1 <= netmask <= 32:
raise ValueError
ipi = ipaddress.ip_interface(six.text_type(value))
if ipi.is_reserved:
raise ValueError
except ValueError:
raise ValidationError(_('Enter a valid IPv4 address or IPv4 network.'))
if return_ip_interface:
return ipi | 1c0afd08f3f4f079dc4004400449fe6e27cf0ef7 | 11,494 |
def rh2a(rh, T, e_sat_func=e_sat_gg_water):
"""
Calculate the absolute humidity from relative humidity, air temperature,
and pressure.
Parameters
----------
rh:
Relative humidity in Pa / Pa
T:
Temperature in K
e_sat_func: func, optional
Function to estimate the saturation pressure. E.g. e_sat_gg_water for
water and e_sat_gg_ice for ice.
Returns
-------
float :
absolute humidity [kg / kg]
"""
with np.errstate(divide='ignore', invalid='ignore'):
if np.any(rh > 5):
raise TypeError("rh must not be in %")
e = rh*e_sat_func(T)
a = e / (meteo_si.constants.Rvapor*T)
return a | cabbae69d28a68531cf79dfe645e0065ab34534e | 11,495 |
def encoder_decoder_generator(start_img):
"""
"""
layer1 = Conv2D(64, kernel_size=4, strides=2, activation='elu', padding='same')(start_img)
layer2 = Conv2D(64, kernel_size=4, strides=2, activation='elu', padding='same')(layer1)
layer3 = Conv2D(64, kernel_size=4, strides=1, activation='elu', padding='same')(layer2)
layer4 = Conv2DTranspose(64, kernel_size=4, strides=2, activation='elu', padding="same")(layer3)
layer5 = Conv2DTranspose(64, kernel_size=4, strides=2, activation='elu', padding="same")(layer4)
layer6 = Conv2D(64, kernel_size=2, strides=1, activation='elu', padding='same')(layer5)
# Make sure that generator output is in the same range as `inputs`
# ie [-1, 1].
net = Conv2D(3, kernel_size=1, activation = 'tanh', padding='same')(layer6)
return net | 9f0ccb7ebae8f0742fdcd464ce9cd072a9099d3e | 11,496 |
def off():
"""
Turns the buzzer off (sets frequency to zero Hz)
Returns:
None
"""
return _rc.writeAttribute(OPTYPE.BUZZER_FREQ, [0]) | 66e2160fed93ba49bf6c39dff0003a05f2875a77 | 11,497 |
def get_shodan_dicts():
"""Build Shodan dictionaries that hold definitions and naming conventions."""
risky_ports = [
"ftp",
"telnet",
"http",
"smtp",
"pop3",
"imap",
"netbios",
"snmp",
"ldap",
"smb",
"sip",
"rdp",
"vnc",
"kerberos",
]
name_dict = {
"ftp": "File Transfer Protocol",
"telnet": "Telnet",
"http": "Hypertext Transfer Protocol",
"smtp": "Simple Mail Transfer Protocol",
"pop3": "Post Office Protocol 3",
"imap": "Internet Message Access Protocol",
"netbios": "Network Basic Input/Output System",
"snmp": "Simple Network Management Protocol",
"ldap": "Lightweight Directory Access Protocol",
"smb": "Server Message Block",
"sip": "Session Initiation Protocol",
"rdp": "Remote Desktop Protocol",
"kerberos": "Kerberos",
}
risk_dict = {
"ftp": "FTP",
"telnet": "Telnet",
"http": "HTTP",
"smtp": "SMTP",
"pop3": "POP3",
"imap": "IMAP",
"netbios": "NetBIOS",
"snmp": "SNMP",
"ldap": "LDAP",
"smb": "SMB",
"sip": "SIP",
"rdp": "RDP",
"vnc": "VNC",
"kerberos": "Kerberos",
}
# Create dictionaries for CVSSv2 vector definitions using https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator
av_dict = {
"NETWORK": "A vulnerability exploitable with network access means the vulnerable software is bound to the network stack and the attacker does not require local network access or local access. Such a vulnerability is often termed “remotely exploitable”. An example of a network attack is an RPC buffer overflow.",
"ADJACENT_NETWORK": "A vulnerability exploitable with adjacent network access requires the attacker to have access to either the broadcast or collision domain of the vulnerable software. Examples of local networks include local IP subnet, Bluetooth, IEEE 802.11, and local Ethernet segment.",
"LOCAL": "A vulnerability exploitable with only local access requires the attacker to have either physical access to the vulnerable system or a local (shell) account. Examples of locally exploitable vulnerabilities are peripheral attacks such as Firewire/USB DMA attacks, and local privilege escalations (e.g., sudo).",
}
ac_dict = {
"LOW": "Specialized access conditions or extenuating circumstances do not exist. The following are examples: The affected product typically requires access to a wide range of systems and users, possibly anonymous and untrusted (e.g., Internet-facing web or mail server). The affected configuration is default or ubiquitous. The attack can be performed manually and requires little skill or additional information gathering. The 'race condition' is a lazy one (i.e., it is technically a race but easily winnable).",
"MEDIUM": "The access conditions are somewhat specialized; the following are examples: The attacking party is limited to a group of systems or users at some level of authorization, possibly untrusted. Some information must be gathered before a successful attack can be launched. The affected configuration is non-default, and is not commonly configured (e.g., a vulnerability present when a server performs user account authentication via a specific scheme, but not present for another authentication scheme). The attack requires a small amount of social engineering that might occasionally fool cautious users (e.g., phishing attacks that modify a web browser’s status bar to show a false link, having to be on someone’s “buddy” list before sending an IM exploit).",
"HIGH": "Specialized access conditions exist. For example, in most configurations, the attacking party must already have elevated privileges or spoof additional systems in addition to the attacking system (e.g., DNS hijacking). The attack depends on social engineering methods that would be easily detected by knowledgeable people. For example, the victim must perform several suspicious or atypical actions. The vulnerable configuration is seen very rarely in practice. If a race condition exists, the window is very narrow.",
}
ci_dict = {
"NONE": "There is no impact to the confidentiality of the system",
"PARTIAL": "There is considerable informational disclosure. Access to some system files is possible, but the attacker does not have control over what is obtained, or the scope of the loss is constrained. An example is a vulnerability that divulges only certain tables in a database.",
"COMPLETE": "There is total information disclosure, resulting in all system files being revealed. The attacker is able to read all of the system's data (memory, files, etc.).",
}
return risky_ports, name_dict, risk_dict, av_dict, ac_dict, ci_dict | 2aace61b8339db848e95758fcb9f30856915d6fc | 11,499 |
def safe_download(f):
"""
Makes a download safe, by trapping any app errors and redirecting
to a default landing page.
Assumes that the first 2 arguments to the function after request are
domain and app_id, or there are keyword arguments with those names
"""
@wraps(f)
def _safe_download(request, *args, **kwargs):
domain = args[0] if len(args) > 0 else kwargs["domain"]
app_id = args[1] if len(args) > 1 else kwargs["app_id"]
latest = True if request.GET.get('latest') == 'true' else False
target = request.GET.get('target') or None
try:
request.app = get_app(domain, app_id, latest=latest, target=target)
return f(request, *args, **kwargs)
except (AppEditingError, CaseError), e:
logging.exception(e)
messages.error(request, "Problem downloading file: %s" % e)
return HttpResponseRedirect(reverse("corehq.apps.app_manager.views.view_app", args=[domain,app_id]))
return _safe_download | 1d48c48ac067fcc180af37b90949123c5dc864d9 | 11,500 |
def Moebius(quaternion_or_infinity, a,b=None,c=None,d=None):
"""
The Moebius transformation of a quaternion (z)
with parameters a,b,c and d
>>> import qmath
>>> a = qmath.quaternion([1,1,1,0])
>>> b = qmath.quaternion([-2,1,0,1])
>>> c = qmath.quaternion([1,0,0,0])
>>> d = qmath.quaternion([0,-1,-3,-4])
>>> z = qmath.quaternion([1,1,3,4])
>>> qmath.Moebius(z,a,b,c,d)
(-5.0+7.0i+7.0k)
>>> d = - z
>>> z = qmath.Moebius(z,a,b,c,d)
>>> z
'Infinity'
>>> qmath.Moebius(z,a,b,c,d)
(1.0+1.0i+1.0j)
"""
if type(a) == tuple:
return Moebius(quaternion_or_infinity,a[0],a[1],a[2],a[3])
else:
A = quaternion(a)
B = quaternion(b)
C = quaternion(c)
D = quaternion(d)
if A * D - B * C == 0:
raise RuntimeError(' this is not a Moebius transformation')
elif quaternion_or_infinity == 'Infinity':
return A / C
else:
Z = quaternion(quaternion_or_infinity)
try:
return (A * Z + B) * quaternion.inverse(C * Z + D)
except:
return 'Infinity' | 9bfd05268caa6aad1247886717932ca332212e4b | 11,501 |
def _passthrough_zotero_data(zotero_data):
"""
Address known issues with Zotero metadata.
Assumes zotero data should contain a single bibliographic record.
"""
if not isinstance(zotero_data, list):
raise ValueError('_passthrough_zotero_data: zotero_data should be a list')
if len(zotero_data) > 1:
# Sometimes translation-server creates multiple data items for a single record.
# If so, keep only the parent item, and remove child items (such as notes).
# https://github.com/zotero/translation-server/issues/67
zotero_data = zotero_data[:1]
return zotero_data | cec2271a7a966b77e2d380686ecccc0307f78116 | 11,502 |
import json
def telebot():
"""endpoint responsible to parse and respond bot webhook"""
payload = json.loads(request.data)
message = payload.get('message', payload.get('edited_message',''))
msg_from = message.get('from')
user_id = msg_from.get('id')
user_first_name = msg_from.get('first_name','')
user_last_name = msg_from.get('last_name','')
user_is_bot = msg_from.get('is_bot')
chat = message.get('chat')
chat_id = chat.get('id')
command = message.get('text')
if user_is_bot or message == '':
return jsonify({'method': 'sendMessage','chat_id' : chat_id,'text': 'Sorry I can\'t answer you!'})
bot_response = {
'method': 'sendMessage',
'chat_id' : chat_id,
'text': f'[{user_first_name} {user_last_name}](tg://user?id={user_id}) {command}',
'parse_mode':'Markdown',
}
return jsonify(bot_response) | 3a42fee4a89e1be3fa1ec17da21738bfcefba4ba | 11,503 |
def root(tmpdir):
"""Return a pytest temporary directory"""
return tmpdir | 9fa01d67461f8ce1e3d3ad900cf8a893c5a075aa | 11,505 |
from app.crud.core import ready
import logging
def _check_storage(log_fn: tp.Callable) -> bool:
"""See if the storage system is alive."""
try:
log_fn('Attempting to contact storage system', depth=1)
result = ready()
return result
except Exception as ex:
log_fn(ex, level=logging.WARN, depth=1)
return False | b20ca64094126a40fd8eb0ce76e3329c8b4da6cb | 11,506 |
def ignore_ip_addresses_rule_generator(ignore_ip_addresses):
"""
generate tshark rule to ignore ip addresses
Args:
ignore_ip_addresses: list of ip addresses
Returns:
rule string
"""
rules = []
for ip_address in ignore_ip_addresses:
rules.append("-Y ip.dst != {0}".format(ip_address))
return rules | 3ac43f28a4c8610d4350d0698d93675572d6ba44 | 11,507 |
def readmission(aFileName):
"""
Load a mission from a file into a list. The mission definition is in the Waypoint file
format (http://qgroundcontrol.org/mavlink/waypoint_protocol#waypoint_file_format).
This function is used by upload_mission().
"""
print "\nReading mission from file: %s" % aFileName
cmds = vehicle.commands
missionlist=[]
with open(aFileName) as f:
for i, line in enumerate(f):
if i==0:
if not line.startswith('QGC WPL 110'):
raise Exception('File is not supported WP version')
else:
linearray=line.split('\t')
ln_index=int(linearray[0])
ln_currentwp=int(linearray[1])
ln_frame=int(linearray[2])
ln_command=int(linearray[3])
ln_param1=float(linearray[4])
ln_param2=float(linearray[5])
ln_param3=float(linearray[6])
ln_param4=float(linearray[7])
ln_param5=float(linearray[8])
ln_param6=float(linearray[9])
ln_param7=float(linearray[10])
ln_autocontinue=int(linearray[11].strip())
cmd = Command( 0, 0, 0, ln_frame, ln_command, ln_currentwp, ln_autocontinue, ln_param1, ln_param2, ln_param3, ln_param4, ln_param5, ln_param6, ln_param7)
missionlist.append(cmd)
return missionlist | 08e92ef784340dcd9bbd3ca8bb85a9c8a9211841 | 11,508 |
def remove_stop_words(words):
"""Remove all stop words.
Args:
words (list): The list of words
Returns:
list: An updated word list with stopwords removed.
"""
# http://stackoverflow.com/questions/5486337/
# how-to-remove-stop-words-using-nltk-or-python
return [w for w in words if w.lower() not in stopwords.words('english')] | 29910d1c04cb27ac281428a5401501e4c0e633ae | 11,509 |
def synthetic_costs_1():
""" Uncertainty in 5 points at [0,0] on X1 can cause it to flip
to [1,0] if needed to misclassify
Uncertainty in 1 point at [1,1] on X2 can cause it to flip
to [1,0] if needed to misclassify
All other points certain
"""
costs = np.array([[1,4],[1,4],[1,4],[1,4],[1,4],[4,4],[4,4],
[4,4],[4,4],[4,4],
[4,1],
[4,4],[4,4]])
return costs | 97753d9e816feba56b609685831df2d183ab408f | 11,510 |
def example_one(request, context=None):
""" Return web page for example one. """
if context is None:
context = {}
session = request.session.get("ApiSession", None)
if session is None:
return no_session_set(request)
session = Session.deserialize(session)
origin_codes = get_codes_with_filter(session, REPORTING_AIRPORT_CODE, 0)
context.update(
{
"title": "Example 1",
"active": "example_one",
"origin_codes": origin_codes,
"dest_codes": EXAMPLE_ONE_DESTS,
}
)
return render(request, "example_one.html", context) | 25bc3fea514e4011c3be513868fd58d0c2b80d2f | 11,511 |
from typing import Any
def decode(cls: Any, value: bytes) -> Any:
"""Decode value in katcp message to a type.
If a union type is provided, the value must decode successfully (i.e.,
without raising :exc:`ValueError`) for exactly one of the types in the
union, otherwise a :exc:`ValueError` is raised.
Parameters
----------
cls
The target type, or a :class:`typing.Union` of types.
value
Raw (but unescaped) value in katcp message
Raises
------
ValueError
if `value` does not have a valid value for `cls`
TypeError
if `cls` is not a registered type or union of registered
types.
See also
--------
:func:`register_type`
"""
union_args = _union_args(cls)
if union_args is not None:
values = [] # type: List[Any]
for type_ in union_args:
try:
values.append(decode(type_, value))
except ValueError:
pass
if len(values) == 1:
return values[0]
elif not values:
raise ValueError('None of the types in {} could decode {!r}'.format(
cls, value))
else:
raise ValueError('{!r} is ambiguous for {}'.format(value, cls))
else:
return get_type(cls).decode(cls, value) | 3036b69089e68d2a47c3ca110024bde6a026ba5d | 11,512 |
from typing import TextIO
def load_f0(fhandle: TextIO) -> annotations.F0Data:
"""Load an ikala f0 annotation
Args:
fhandle (str or file-like): File-like object or path to f0 annotation file
Raises:
IOError: If f0_path does not exist
Returns:
F0Data: the f0 annotation data
"""
lines = fhandle.readlines()
f0_midi = np.array([float(line) for line in lines])
f0_hz = librosa.midi_to_hz(f0_midi) * (f0_midi > 0)
confidence = (f0_hz > 0).astype(float)
times = (np.arange(len(f0_midi)) * TIME_STEP) + (TIME_STEP / 2.0)
f0_data = annotations.F0Data(times, f0_hz, confidence)
return f0_data | 7c0f47e63db1a6fee4718420d74799fa73740b52 | 11,513 |
def remove_fallen(lst):
"""removes fallen orcs from a list"""
return [x for x in lst if x.standing] | 9e621321909dc7aa13da3d2a7902bb4604ae62f6 | 11,514 |
def gc_resnet152(num_classes):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(GCBottleneck, [3, 8, 36, 3], num_classes=num_classes)
model.avgpool = nn.AdaptiveAvgPool2d(1)
return model | a1986afd48284471045b08322e008796ee7743bb | 11,515 |
def consume_entropy(generated_password: str, quotient: int, max_length: int) -> str:
"""
Takes the entropy (quotient) and the length of password (max_length) required
and uses the remainder of their division as the index to pick a character from
the characters list.
This process occurs recursively until the password is of the required length.
"""
if len(generated_password) >= max_length:
return generated_password
quotient, remainder = divmod(quotient, len(characters))
generated_password += characters[remainder]
return consume_entropy(generated_password, quotient, max_length) | 8ba58b34704e9db389241a255e9ec1963e508c99 | 11,516 |
def randomNormal(n, height, baseshape=[]):
"""
Generate random positions, normally distributed along z. Base shape can be:
[] (1D sim)
[Ly] (2D sim)
[Lx, Ly] (3D sim)
Where Lx, Ly are lengths along x, y.
"""
nDim = len(baseshape) + 1
pos = np.zeros([n, nDim])
z = np.random.randn(n)
z *= height
pos[:,-1] = z
for i in range(nDim - 1):
pos[:, i] = np.random.rand(n) * baseshape[i]
return pos | 96d703ecc059fe180b71f547dfee7f259d803a87 | 11,517 |
def _parseCellContentsSection(fileAsList, lineIdx):
""" returns fractCoords from Cell Contents section of castep
Args:
fileAsList(str list): Each entry is 1 line of the castep input file
lineIdx(int): The index containing the line "cell contents"
Returns
fractCoords: nx4 iter with each containing [x,y,z,symbol]. Used to init UnitCell objects
"""
finished = False
while not finished:
currLine = fileAsList[lineIdx].strip()
if "Fractional coord" in fileAsList[lineIdx]:
lineIdx = lineIdx + 3
fractCoords = list()
while "xx" not in currLine:
currLine = fileAsList[lineIdx].strip()
splitLine = currLine.split()
if len(splitLine) == 1:
break
currCoords = [float(x) for x in splitLine[3:6]] + [splitLine[1]]
fractCoords.append(currCoords)
lineIdx = lineIdx + 1
break
else:
lineIdx = lineIdx+1
return fractCoords, lineIdx | 3baa1a200442ef8681a0741bfa2a60d9ca1e20b2 | 11,520 |
def get_avg_no_of_feat_values(contents):
"""
Helper to calculate numbers of different values
of categorical features, averaged for all features
"""
total = 0
for i in range(0, len(contents[0])):
total += len(set([x[i] for x in contents]))
return float(total) / float(len(contents[0])) | 4e913298d7f133eb08afe23e4999f5b20f455dc1 | 11,521 |
def plot_trend_line(axes_, xd, yd, c='r', alpha=1, cus_loc = None, text_color='black', return_params=False,
extra_text='', t_line_1_1=True, fit_function=None, fontsize_=12, add_text=True):
"""Make a line of best fit"""
#create clean series
x_, y_ = coincidence(xd,yd)
if fit_function is not None:
params = curve_fit(fit_function, x_, y_)
print('fitted parameters')
print(params[0])
fit_line_x = np.arange(int(np.nanmin(x_)),int(np.nanmax(x_))+1,.1)
plotting_par_list = [fit_line_x]
for fit_par in params[0]:
plotting_par_list.append(fit_par)
funt_par = tuple(plotting_par_list)
fit_line_y = fit_function(*funt_par)
axes_.plot(fit_line_x, fit_line_y, c, alpha=alpha)
# calculate R2
plotting_par_list = [x_]
params_str_ = ''
for i_, fit_par in enumerate(params[0]):
if extra_text == '':
params_str_ = params_str_ + 'fit parameters ' + str(i_+1) + ': ' + '$%0.2f$' % (fit_par) + '\n'
else:
params_str_ = params_str_ + extra_text + '$%0.2f$' % (fit_par) + '\n'
plotting_par_list.append(fit_par)
funt_par = tuple(plotting_par_list)
fit_line_y = fit_function(*funt_par)
residuals = y_ - fit_line_y
ss_res = np.sum(residuals**2)
ss_tot = np.sum((y_ - np.mean(y_))**2)
Rsqr = float(1 - (ss_res / ss_tot))
# Plot R^2 value
x_1 = np.nanmin(x_)
y_2 = np.nanmax(y_)
error_text = '$R^2 = %0.2f$' % Rsqr
if cus_loc is None:
axes_.text(x_1, y_2 , params_str_ + error_text, fontsize=fontsize_,
horizontalalignment='left',verticalalignment='top',color=text_color,
bbox={'facecolor': 'white', 'edgecolor': 'none'})
else:
axes_.text(cus_loc[0], cus_loc[1] , params_str_ + error_text, fontsize=fontsize_,
horizontalalignment='left',verticalalignment='top',color=text_color,
bbox={'facecolor': 'white', 'edgecolor': 'none'})
else:
# Calculate trend line
params = np.polyfit(x_, y_, 1)
intercept = params[-1]
slope = params[-2]
minxd = np.nanmin(x_)
maxxd = np.nanmax(x_)
xl = np.array([minxd, maxxd])
yl = slope * xl + intercept
print('fitted parameters')
print(slope, intercept)
# Plot trend line
axes_.plot(xl, yl, c, alpha=alpha)
# Calculate R Squared
poly_1d = np.poly1d(params)
ybar = np.sum(y_) / len(y_)
ssreg = np.sum((poly_1d(x_) - ybar) ** 2)
sstot = np.sum((y_ - ybar) ** 2)
Rsqr = float(ssreg / sstot)
# Plot R^2 value
x_1 = np.nanmin(x_)
y_2 = np.nanmax(y_)
if intercept >= 0:
if extra_text=='':
equat_text = '$Y = %0.2f*x + %0.2f$' % (slope,intercept)
else:
equat_text = extra_text + '\n' + '$Y = %0.2f*x + %0.2f$' % (slope,intercept)
else:
if extra_text=='':
equat_text = '$Y = %0.2f*x %0.2f$' % (slope,intercept)
else:
equat_text = extra_text + '\n' + '$Y = %0.2f*x %0.2f$' % (slope,intercept)
error_text = '$R^2 = %0.2f$' % Rsqr
if add_text:
if cus_loc is None:
axes_.text(x_1, y_2 , equat_text + '\n' + error_text, fontsize=fontsize_,
horizontalalignment='left',verticalalignment='top',color=text_color)
else:
axes_.text(cus_loc[0], cus_loc[1] , equat_text + '\n' + error_text, fontsize=fontsize_,
horizontalalignment='left',verticalalignment='top',color=text_color)
# plot 1:1 line if true
if t_line_1_1:
xy_min = np.min([np.nanmin(x_),np.nanmin(y_)])
xy_max = np.max([np.nanmax(x_),np.nanmax(y_)])
axes_.plot([xy_min, xy_max], [xy_min, xy_max], 'k--')
if return_params:
return Rsqr, params
else:
return Rsqr | a4d6e41bf03524f257531bbb0f2bb43d1b3b6b8b | 11,522 |
from pathlib import Path
import yaml
def get_oil_type_atb(
oil_attrs, origin, destination, transport_data_dir, random_generator
):
"""Randomly choose type of cargo oil spilled from an ATB (articulated tug and barge) based on
AIS track origin & destination, and oil cargo attribution analysis.
Unlike traditional tank barges, the vessels with 'atb' designation are known oil-cargo vessels.
We used three different data sources to verify: AIS, Dept of Ecology's fuel transfer records
and Charlie Costanzo's ATB list. Details of traffic can be seen in this google spreadsheet:
https://docs.google.com/spreadsheets/d/1dlT0JydkFG43LorqgtHle5IN6caRYjf_3qLrUYqANDY/edit
Because of this pre-identification and selection method, we can assume that all ATBs are
oil-cargo atbs and that the absence of origin-destination information is due to issues in
linking ship tracks and not ambiguity about whether traffic is oil-cargo traffic.
:param dict oil_attrs: Oil attribution information from the output of make_oil_attrs.py.
:param str or None origin: Origin of AIS track from which spill occurs.
:param str or None destination: Destination of AIS track from which spill occurs.
:param transport_data_dir: Directory path to marine_transport_data files repository
cloned from https://github.com/MIDOSS/marine_transport_data.
:type transport_data_dir: :py:class:`pathlib.Path`
:param random_generator: PCG-64 random number generator
:type random_generator: :py:class:`numpy.random.Generator`
:return: Type of oil spilled.
:rtype: str
"""
vessel_type = "atb"
# Assign US and CAD origin/destinations from oil_attrs file
CAD_origin_destination = oil_attrs["categories"]["CAD_origin_destination"]
US_origin_destination = oil_attrs["categories"]["US_origin_destination"]
# Get cargo oil type attribution information from oil-type yaml files
yaml_file = transport_data_dir / Path(oil_attrs["files"]["CAD_origin"]).name
with yaml_file.open("rt") as f:
CAD_yaml = yaml.safe_load(f)
yaml_file = transport_data_dir / Path(oil_attrs["files"]["WA_destination"]).name
with yaml_file.open("rt") as f:
WA_in_yaml = yaml.safe_load(f)
WA_in_noinfo = _calc_no_info_facilities(WA_in_yaml)
yaml_file = transport_data_dir / Path(oil_attrs["files"]["WA_origin"]).name
with yaml_file.open("rt") as f:
WA_out_yaml = yaml.safe_load(f)
WA_out_noinfo = _calc_no_info_facilities(WA_out_yaml)
# US_origin is for US as origin
yaml_file = transport_data_dir / Path(oil_attrs["files"]["US_origin"]).name
with yaml_file.open("rt") as f:
US_yaml = yaml.safe_load(f)
# US_combined represents the combined import and export of oil
yaml_file = transport_data_dir / Path(oil_attrs["files"]["US_combined"]).name
with yaml_file.open("rt") as f:
USall_yaml = yaml.safe_load(f)
yaml_file = transport_data_dir / Path(oil_attrs["files"]["Pacific_origin"]).name
with yaml_file.open("rt") as f:
Pacific_yaml = yaml.safe_load(f)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# NOTE: these pairs need to be used together for "get_oil_type_cargo"
# (but don't yet have error-checks in place):
# - "WA_in_yaml" and "destination"
# - "WA_out_yaml" and "origin"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if origin in CAD_origin_destination:
if origin == "Westridge Marine Terminal":
if destination == "U.S. Oil & Refining":
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
elif destination in US_origin_destination:
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
elif destination in CAD_origin_destination:
# assume export within CAD is from Jet fuel storage tanks
# as there is a pipeline to Parkland for crude oil
oil_type = "jet"
else:
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
else:
if destination in US_origin_destination:
# we have better information on WA fuel transfers,
# so I prioritize this information source
oil_type = get_oil_type_cargo(
WA_in_yaml, destination, vessel_type, random_generator
)
elif destination == "ESSO Nanaimo Departure Bay":
oil_type = get_oil_type_cargo(
CAD_yaml, destination, vessel_type, random_generator
)
elif destination == "Suncor Nanaimo":
oil_type = get_oil_type_cargo(
CAD_yaml, destination, vessel_type, random_generator
)
else:
oil_type = get_oil_type_cargo(
CAD_yaml, origin, vessel_type, random_generator
)
elif origin in US_origin_destination and origin not in WA_out_noinfo[vessel_type]:
if destination == "Westridge Marine Terminal":
# Westridge stores jet fuel from US for re-distribution
oil_type = "jet"
else:
oil_type = get_oil_type_cargo(
WA_out_yaml, origin, vessel_type, random_generator
)
elif (
destination in US_origin_destination
and destination not in WA_in_noinfo[vessel_type]
):
oil_type = get_oil_type_cargo(
WA_in_yaml, destination, vessel_type, random_generator
)
elif destination in CAD_origin_destination:
if destination == "Westridge Marine Terminal":
# Westridge doesn't receive crude for storage
oil_type = "jet"
else:
oil_type = get_oil_type_cargo(
CAD_yaml, destination, vessel_type, random_generator
)
elif origin == "Pacific":
oil_type = get_oil_type_cargo(
Pacific_yaml, origin, vessel_type, random_generator
)
elif origin == "US":
oil_type = get_oil_type_cargo(US_yaml, origin, vessel_type, random_generator)
else:
# For all other traffic, use a generic fuel attribution from the combined
# US import and export
oil_type = get_oil_type_cargo(USall_yaml, None, vessel_type, random_generator)
return oil_type | e7e6e51ece2bb5b4fffc70d2507c2e5ff062bbd8 | 11,523 |
def get_jwt():
"""
Get Authorization token and validate its signature
against the application's secret key, .
"""
expected_errors = {
KeyError: WRONG_PAYLOAD_STRUCTURE,
AssertionError: JWK_HOST_MISSING,
InvalidSignatureError: WRONG_KEY,
DecodeError: WRONG_JWT_STRUCTURE,
InvalidAudienceError: WRONG_AUDIENCE,
TypeError: KID_NOT_FOUND
}
token = get_auth_token()
try:
jwks_payload = jwt.decode(token, options={'verify_signature': False})
assert 'jwks_host' in jwks_payload
jwks_host = jwks_payload.get('jwks_host')
key = get_public_key(jwks_host, token)
aud = request.url_root
payload = jwt.decode(
token, key=key, algorithms=['RS256'], audience=[aud.rstrip('/')]
)
set_ctr_entities_limit(payload)
return payload
except tuple(expected_errors) as error:
message = expected_errors[error.__class__]
raise AuthorizationError(message) | 9c52369b38db9815769ea8277c3e3721ba20c1c9 | 11,524 |
def start_volume(name, force=False):
"""
Start a gluster volume
name
Volume name
force
Force the volume start even if the volume is started
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' glusterfs.start mycluster
"""
cmd = "volume start {0}".format(name)
if force:
cmd = "{0} force".format(cmd)
volinfo = info(name)
if name not in volinfo:
log.error("Cannot start non-existing volume %s", name)
return False
if not force and volinfo[name]["status"] == "1":
log.info("Volume %s already started", name)
return True
return _gluster(cmd) | 6dc936b4e09beb9713c32e4c93e8649999f82c3c | 11,525 |
def hist_equal(img, z_max=255):
"""
直方图均衡化,将暗的地方变量,亮的地方变暗
:param img:
:param z_max: 原图像最亮的地方减去最暗的地方的值
:return:
"""
if len(img.shape) == 2:
height, width = img.shape
n_chan = 1
elif len(img.shape) == 3:
height, width, n_chan = img.shape
print(img[:, :, 0].shape)
# H, W = img.shape
# S is the total of pixels
n_pixle = height * width
out = img.copy()
sum_h = 0.
if n_chan == 1:
for i in range(1, 255):
ind = np.where(img == i)
sum_h += len(img[ind])
z_prime = z_max / n_pixle * sum_h
out[ind] = z_prime
else:
for c in range(n_chan):
tmp_img = img[:, :, c]
tmp_out = tmp_img.copy()
for i in range(1, 255):
ind = np.where(tmp_img == i)
sum_h += len(tmp_img[ind])
z_prime = z_max / n_pixle * sum_h
tmp_out[ind] = z_prime
out[:, :, c] = tmp_out
out = out.astype(np.uint8)
return out | e6aaf76ce8088b9519cd896d8236a84f01761976 | 11,526 |
def combine(*indices_lists):
"""
Return all the combinations from lists of indices
:param indices_lists: each argument is a list of indices (it must be a list)
:return: The combined list of indices
"""
if len([*indices_lists]) > 1:
return [i for i in product(*indices_lists)]
else:
return set(*indices_lists) | 839762c9645e0c8d6ea31a21113a5efd6b97f1de | 11,527 |
import re
def get_endpoint(query):
"""
Regex to parse domain and API endpoint from a SoQL query via FROM
statement
:param query: str, SoQL-formatted query
:return
url, endpoint, query: str objects, domain, endpoint, and
original query sans FROM statement
"""
url = re.search(r'\w+\.\w+\.(\w{2,3})', query, flags=re.I)
endpoint = re.search(r'(\w{4}-\w{4})\.json', query, flags=re.I)
query = re.sub(r'from( +|\t+|\n+).+', '', query, flags=re.I)
return url.group(), endpoint.group(1), query | 4496c85f2e6f908bd5dcef7195b821998ef79c42 | 11,528 |
def load_data(filename: str) ->pd.DataFrame:
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename)
df.fillna(0, inplace=True)
df[["id", "price", "bedrooms", "bathrooms", "sqft_living", "sqft_lot", "floors", "waterfront", "view",
"condition", "grade", "sqft_above", "sqft_basement", "yr_built", "yr_renovated",
"zipcode", "lat", "long", "sqft_living15", "sqft_lot15"]] = df[
["id", "price", "bedrooms", "bathrooms", "sqft_living", "sqft_lot", "floors", "waterfront", "view",
"condition", "grade", "sqft_above", "sqft_basement", "yr_built", "yr_renovated",
"zipcode", "lat", "long", "sqft_living15", "sqft_lot15"]].apply(pd.to_numeric)
df['date'] = df['date'].astype("str").apply(lambda s: s[:8])
df['date'] = df['date'].astype('float64')
df = df[
(df["id"] >= 1) &
(df["date"] >= 20000000) & (df["date"] <= 20220000) &
(df["price"] >= 50000) & (df["price"] <= 10000000) &
(df["bedrooms"] >= 0) & (df["bedrooms"] <= 15) &
(df["bathrooms"] >= 0) & (df["bathrooms"] <= 12) &
(df["sqft_living"] >= 200) & (df["sqft_living"] <= 100000) &
(df["sqft_lot"] >= 450) & (df["sqft_lot"] <= 1800000) &
(df["floors"] >= 1) & (df["floors"] <= 4) &
(df["waterfront"] == 0) | (df["waterfront"] == 1) &
(df["view"] >= 0) & (df["view"] <= 4) &
(df["condition"] >= 1) & (df["condition"] <= 5) &
(df["grade"] >= 1) & (df["grade"] <= 13) &
(df["sqft_above"] >= 250) & (df["sqft_above"] <= 10000) &
(df["sqft_basement"] >= 0) & (df["sqft_basement"] <= 5000) &
(df["yr_built"] >= 1800) & (df["yr_built"] <= 2022) &
(df["yr_renovated"] >= 0) & (df["yr_renovated"] <= 2022) &
(df["zipcode"] >= 98000) & (df["zipcode"] <= 99000) &
(df["lat"] >= 47) & (df["lat"] <= 48) &
(df["long"] >= -123) & (df["long"] <= -121) &
(df["sqft_living15"] >= 300) & (df["sqft_living15"] <= 10000) &
(df["sqft_lot15"] >= 300) & (df["sqft_lot15"] <= 1000000)
]
# inserting the "yr_renovated" col the last year in which the building had had any renovation.
df["yr_renovated"] = df[["yr_built", "yr_renovated"]].max(axis=1)
prices_by_zipcode = pd.DataFrame({'zipcode': df['zipcode'], 'price': df['price']})
prices_by_zipcode = prices_by_zipcode.groupby('zipcode').mean()
prices_by_zipcode.rename(columns={'price': 'mean_price'}, inplace=True)
df = pd.merge(df, prices_by_zipcode, on='zipcode')
df = df.drop(['id', 'zipcode', 'lat', 'long'], 1)
return df | 26c785cb72b883cab03b9da6c7718b71e7ccea76 | 11,529 |
def version_match(required, candidate):
"""Test that an available version is a suitable match for a required
version.
To be suitable a version must be of the same major version as required
and be at least a match in minor/patch level.
eg. 3.3 is a match for a required 3.1 but 4.1 is not.
:param tuple required: the version that must be met.
:param tuple candidate: the version to test against required.
:returns: True if candidate is suitable False otherwise.
:rtype: bool
"""
return _discover.version_match(required, candidate) | bc537fdae084a3c3ccb7b8336703ef4c2476de6e | 11,530 |
from typing import Optional
from datetime import datetime
def get_last_upgraded_at(module: base.Module) -> Optional[datetime.datetime]:
"""
Get the timestamp of the last time this module was upgraded.
"""
return settings.get_last_upgraded_at(module.name) | bf884bf4c249448929b987504d400d6ba1b12927 | 11,531 |
import collections
import re
import logging
def parse_header_file(header_file):
"""Parse a single header file to get all defined constants out of it."""
resolved_values = collections.OrderedDict()
raw_matches = {}
with open(header_file, "r") as fd:
all_file_lines = collections.OrderedDict(
[
(lineno, line.strip())
for lineno, line in enumerate(fd, start=1)
if not line.isspace()
]
)
line_iterator = iter(all_file_lines.items())
for lineno, line in line_iterator:
line, _comment = clean_line(line)
# First check to see if this is a #define statement
match = re.match(r"^#define\s+UC_(?P<id>\w+)\s+(?P<value>.*)$", line)
if match:
name = "UC_" + match.group("id")
raw_value = match.group("value")
try:
resolved_values[name] = ast.literal_eval(raw_value)
except (NameError, SyntaxError, ValueError):
raw_matches[name] = raw_value
continue
# Not a #define; see if it's an enum.
if "enum uc_" not in line.lower():
continue
# This is the beginning of an enum. Subsequent lines until the closing `}` are
# part of it. We need to keep track because enums without an explicitly defined
# value are incremented by one from the previous enum value.
next_enum_value = 0
enum_start_line = lineno
while True:
lineno, line = next(line_iterator, (None, None))
if line is None:
# Hit EOF before we hit the end of the enum. That's odd.
logging.warning(
"Hit EOF before end of enum beginning on line %d.", enum_start_line
)
break
elif "}" in line:
# Hit the end of the enum.
break
line, _comment = clean_line(line)
# Sometimes we have multiple enum definitions on one line. We need to handle
# these one at a time. Splitting the line by commas should be enough to
# separate out multiple expressions.
for expression in line.strip(",").split(","):
expression = expression.strip()
if not expression:
continue
# See if this enum value is being assigned rather than implicit.
match = re.match(r"^UC_(?P<id>\w+)\s*=\s*(?P<expr>.+)$", expression)
if match:
# Enum value is assigned. Whatever's on the right-hand side, any
# names it references must already be defined.
name = "UC_" + match.group("id")
raw_value = match.group("expr")
try:
processed_value = eval(raw_value, resolved_values)
except NameError as nerr:
logging.error(
"Failed to resolve %r on line %d: %s", name, lineno, nerr
)
continue
resolved_values[name] = processed_value
next_enum_value = processed_value + 1
else:
# Not an explicit assignment. Expect this expression to be just a
# single identifier.
match = re.match(r"^UC_(\w+)$", expression)
if match:
name = match.group(1)
resolved_values["UC_" + name] = next_enum_value
next_enum_value += 1
else:
raise SyntaxError(
"Couldn't match any expression type to: %r" % expression
)
for name, raw_value in raw_matches.items():
# Convert any remaining values that are still unresolved. This usually only
# applies to #define macros that reference other constants.
if name not in resolved_values:
resolved_values[name] = eval(raw_value, resolved_values)
return resolved_values | 1681939a78efe6426cdea1577a8781a7f046c02d | 11,532 |
import platform
def get_linux_distribution(get_full_name, supported_dists):
"""Abstract platform.linux_distribution() call which is deprecated as of
Python 3.5 and removed in Python 3.7"""
try:
supported = platform._supported_dists + (supported_dists,)
osinfo = list(
platform.linux_distribution(
full_distribution_name=get_full_name,
supported_dists=supported
)
)
if not osinfo or osinfo == ['', '', '']:
return get_linux_distribution_from_distro(get_full_name)
full_name = platform.linux_distribution()[0].strip()
osinfo.append(full_name)
except AttributeError:
return get_linux_distribution_from_distro(get_full_name)
return osinfo | 01ceea04eeb4e8130e9ce5899a116af557d9f954 | 11,533 |
def check_disabled(func):
"""
Decorator to wrap up checking if the Backdrop
connection is set to disabled or not
"""
@wraps(func)
def _check(*args, **kwargs):
if _DISABLED:
return
else:
return func(*args, **kwargs)
return _check | 53d6b0b44558d09ed73556f6854f004c6767856c | 11,534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.