content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import unittest
def _as_path_test(ctx):
"""Unit tests for maprule_testing.as_path."""
env = unittest.begin(ctx)
asserts.equals(
env,
"Foo\\Bar\\Baz\\Qux",
maprule_testing.cmd_strategy.as_path("Foo/Bar/Baz\\Qux"),
msg = "assertion #1",
)
asserts.equals(
env,
"Foo/Bar/Baz\\Qux",
maprule_testing.bash_strategy.as_path("Foo/Bar/Baz\\Qux"),
msg = "assertion #2",
)
return unittest.end(env)
|
a3a21effafe0a9209a43c3e917745890c2679e1a
| 30,298 |
def alpha_optimisation (problem, alpha_range=np.arange(0.0, 1.1, 0.1)):
"""
This method is used to optimise the alpha parameter.
Alpha parameter is used in the calculation of edges savings:
saving = distance_saving * (1 - alpha) + revenue * alpha
The higher is alpha the bigger is the importance of the revenue,
the lower is alpha the bigger is the importance of the distance saving.
We basically run 10 deterministic executions of the algorithm
(i.e., Mapper and then PJS) for 10 different levels of alpha.
The value of alpha that provides the best deterministic solution
is kept.
NOTE: This method also changes in place the savings of the edges.
:param problem: The problem instance to solve .
:param alpha_range: The levels of alpha to test.
:return: The best value obtained for alpha.
"""
# Move useful references to the stack
dists, depot, sources, nodes = problem.dists, problem.depot, problem.sources, problem.nodes
# Run once the deterministic mapper
mapping = mapper(problem, iterator=greedy)
# Initialise the best alpha to zero
best_alpha, best_revenue = 0.0, float("-inf")
# We try different values of alpha parameter and we keep the best
for alphatest in alpha_range:
# Tray a new value of alpha
alphatest = round(alphatest, 1)
# Compute the edges savings according to the new alpha value
set_savings(problem, alphatest)
# Run a deterministic version of the PJS algorithm for each source.
routes = []
for source in problem.sources:
partial_routes = PJS_cache(problem, source, tuple(source.nodes), depot, alphatest)
routes.extend(partial_routes)
# Total obtained revenue (i.e., quality of the solution)
total_revenue = sum(r.revenue for r in routes)
# Eventually update the alpha
if total_revenue > best_revenue:
best_alpha, best_revenue = alphatest, total_revenue
# Set the savings of the edges by using the best found alpha
set_savings(problem, best_alpha)
# Return the best alpha obtained
return best_alpha
|
6c2e8450ac72b89661a48c1a14971f49afc37dfb
| 30,299 |
from typing import Dict
def _codify_quantitative_input_by_abs_val(
df: pd.DataFrame,
threshold: float,
p_value: float,
) -> Dict[str, int]:
"""Codify nodes with | logFC | if they pass threshold, otherwise score is 0."""
# Codify nodes with | logFC | if they pass threshold
df.loc[(df[LOG_FC]).abs() >= threshold, SCORE] = (df[LOG_FC]).abs()
# Codify nodes with score 0 if it falls below threshold
df.loc[(df[LOG_FC]).abs() < threshold, SCORE] = 0
# LogFC and adjusted p-values are provided in dataset
if P_VALUE in df.columns:
# Disregard entities if logFC adjusted p-value is not significant
return _remove_non_significant_entities(df, p_value)
return df.set_index(LABEL)[SCORE].to_dict()
|
0baaf3a58539f5be2a34d41e553b356e0b4df883
| 30,300 |
from typing import Optional
def labor_day(date: dt.date) -> Optional[str]:
"""First Monday in September"""
if not is_nth_day(date, 0, 0, 9):
return None
return "Happy Memorial Day. You can wear white again"
|
f03746c741ba60c18fa6d9254b1a8d80a7aa3437
| 30,301 |
def vgg16(reparametrized=False, **kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D'], reparametrized=reparametrized), **kwargs)
return model
|
b1c7e4b98fdb25bce70e33865405232b25e17118
| 30,302 |
def logout(request):
"""
:param request:
:return:
"""
auth_logout(request)
return redirect('/')
|
d5d85ff49c36e81557bee83403046e5ec22f78ee
| 30,303 |
def vec3d_rand_corners(corner1, corner2):
""" Sample one R3 point from the AABB
defined by 'corner1' and 'corner2' """
span = np.subtract(corner2, corner1)
sample = vec_random(3)
return [corner1[0]+span[0]*sample[0],
corner1[1]+span[1]*sample[1],
corner1[2]+span[2]*sample[2]]
|
b8e87a869545476d8fce25bfd74c4d29138c93d8
| 30,304 |
def meta_body():
"""Ugoira page data."""
return '{"error":false,"message":"","body":{"src":"https:\/\/i.pximg.net\/img-zip-ugoira\/img\/2019\/04\/29\/16\/09\/38\/74442143_ugoira600x600.zip","originalSrc":"https:\/\/i.pximg.net\/img-zip-ugoira\/img\/2019\/04\/29\/16\/09\/38\/74442143_ugoira1920x1080.zip","mime_type":"image\/jpeg","frames":[{"file":"000000.jpg","delay":70},{"file":"000001.jpg","delay":70},{"file":"000002.jpg","delay":70},{"file":"000003.jpg","delay":70},{"file":"000004.jpg","delay":70},{"file":"000005.jpg","delay":70},{"file":"000006.jpg","delay":70},{"file":"000007.jpg","delay":70},{"file":"000008.jpg","delay":70},{"file":"000009.jpg","delay":70},{"file":"000010.jpg","delay":70},{"file":"000011.jpg","delay":70},{"file":"000012.jpg","delay":70},{"file":"000013.jpg","delay":70},{"file":"000014.jpg","delay":70},{"file":"000015.jpg","delay":70},{"file":"000016.jpg","delay":70},{"file":"000017.jpg","delay":70},{"file":"000018.jpg","delay":70},{"file":"000019.jpg","delay":70},{"file":"000020.jpg","delay":70},{"file":"000021.jpg","delay":70},{"file":"000022.jpg","delay":70},{"file":"000023.jpg","delay":70},{"file":"000024.jpg","delay":70},{"file":"000025.jpg","delay":70},{"file":"000026.jpg","delay":70},{"file":"000027.jpg","delay":70},{"file":"000028.jpg","delay":70},{"file":"000029.jpg","delay":70},{"file":"000030.jpg","delay":70},{"file":"000031.jpg","delay":70},{"file":"000032.jpg","delay":70},{"file":"000033.jpg","delay":70},{"file":"000034.jpg","delay":70},{"file":"000035.jpg","delay":70},{"file":"000036.jpg","delay":70},{"file":"000037.jpg","delay":70},{"file":"000038.jpg","delay":70},{"file":"000039.jpg","delay":70},{"file":"000040.jpg","delay":70},{"file":"000041.jpg","delay":70},{"file":"000042.jpg","delay":70},{"file":"000043.jpg","delay":70},{"file":"000044.jpg","delay":70},{"file":"000045.jpg","delay":70},{"file":"000046.jpg","delay":70},{"file":"000047.jpg","delay":70},{"file":"000048.jpg","delay":70},{"file":"000049.jpg","delay":70},{"file":"000050.jpg","delay":70},{"file":"000051.jpg","delay":70},{"file":"000052.jpg","delay":70},{"file":"000053.jpg","delay":70},{"file":"000054.jpg","delay":70},{"file":"000055.jpg","delay":70},{"file":"000056.jpg","delay":70},{"file":"000057.jpg","delay":70},{"file":"000058.jpg","delay":70},{"file":"000059.jpg","delay":70},{"file":"000060.jpg","delay":70},{"file":"000061.jpg","delay":70},{"file":"000062.jpg","delay":70},{"file":"000063.jpg","delay":70},{"file":"000064.jpg","delay":70},{"file":"000065.jpg","delay":70},{"file":"000066.jpg","delay":70},{"file":"000067.jpg","delay":70},{"file":"000068.jpg","delay":70},{"file":"000069.jpg","delay":70},{"file":"000070.jpg","delay":70},{"file":"000071.jpg","delay":70},{"file":"000072.jpg","delay":70},{"file":"000073.jpg","delay":70},{"file":"000074.jpg","delay":70},{"file":"000075.jpg","delay":70},{"file":"000076.jpg","delay":70}]}}'
|
abf9e01371938467b12721373a0e5fc8fb926016
| 30,307 |
def anatomical_traverse_bids(bids_layout,
modalities='anat',
subjects=None,
sessions=None,
extension=('nii', 'nii.gz', 'json'),
param_files_required=False,
**kwargs):
"""
Builds a convenient dictionary of usable anatomical subjects/sessions.
"""
meta_types = {'datatype' : modalities,
'extension' : extension,
'subjects' : subjects,
'sessions' : sessions}
meta_types.update(kwargs)
non_empty_types = {type_: values for type_, values in meta_types.items() if values}
# __FIELDS_TO_IGNORE__ = ('filename', 'modality', 'type')
# __TYPES__ = ['subjects', 'sessions',]
results = bids_layout.get(**non_empty_types)
if len(results) < 1:
print('No results found!')
return None, None
all_subjects = bids_layout.get_subjects()
all_sessions = bids_layout.get_sessions()
if len(all_sessions) > 1:
sessions_exist = True
combinations = product(all_subjects, all_sessions)
else:
sessions_exist = False
combinations = all_subjects
reqd_exts_params = ('.json', )
named_exts_params = ('params', )
reqd_exts_images = ('.nii', '.gz')
# named_exts_images = ('image', 'image')
files_by_id = dict()
for sub in combinations:
if sessions_exist:
# sub is a tuple of subject,session
results = bids_layout.get(subject=sub[0], session=sub[1],
datatype='anat')
final_sub_id = '_'.join(sub)
else:
results = bids_layout.get(subject=sub, datatype='anat')
final_sub_id = sub
temp = {splitext(file.filename)[-1] : realpath(file.path)
for file in results}
param_files_exist = all([file_ext in temp for file_ext in reqd_exts_params])
image_files_exist = any([file_ext in temp for file_ext in reqd_exts_images])
if param_files_required and (not param_files_exist):
print('parameter files are required, but do not exist for {}'
' - skipping it.'.format(sub))
continue
if not image_files_exist:
print('Image file is required, but does not exist for {}'
' - skipping it.'.format(sub))
continue
files_by_id[final_sub_id] = dict()
# only when all the files required exist, do we include it for review
# adding parameter files, only if they exist
if param_files_exist:
files_by_id[final_sub_id] = {new_ext: temp[old_ext]
for old_ext, new_ext in
zip(reqd_exts_params, named_exts_params)}
else:
files_by_id[final_sub_id]['params'] = 'None'
# adding the image file
files_by_id[final_sub_id]['image'] = \
temp['.nii'] if '.nii' in temp else temp['.gz']
return files_by_id
|
cb48c4af0a4cf2969cbb291daf980d0556989e85
| 30,308 |
def get_email_subscriptions(email):
"""Verifies which email subsciptions exist for the provided email
Parameters
----------
email : str
The email to the check subscriptions for
Returns
-------
list(tuple(str, str, query_hash))
"""
user_queries = db.get_subscribed_queries(email)
user_models = db.get_user_models(email)
model_full_names = {}
for qo, mid, dh in user_queries:
if mid not in model_full_names:
config = load_config_from_s3(mid)
model_full_names[mid] = config.get('human_readable_name', mid)
for mid in user_models:
if mid not in model_full_names:
config = load_config_from_s3(mid)
model_full_names[mid] = config.get('human_readable_name', mid)
results = {
'queries': [(qo.to_english() + f' for model {model_full_names[mid]}',
f'{qo.get_type()}'.replace('_', ' '), qh)
for qo, mid, qh in user_queries],
'models': [(mid, model_full_names[mid]) for mid in user_models]
}
return results
|
84961b40512005a73b78d28feefcc424385bef8f
| 30,309 |
import re
def format_comments(text="default", line_size=90):
"""
Takes a string of text and formats it based on rule 1 (see docs).
"""
# rules to detect fancy comments, if not text
regex1 = r"^ *?####*$"
# rules to detect fancy comments, if text
regex2 = r"^ *?####*([^#\n\r]+)#*"
# if detected pattern 1, replace with this
subst1 = "#"*line_size
# if detected pattern 2, replace with this
def subst2(match_obj):
fix_pad = 4 + 2 # 4 hashes on left plus two spaces
cap_group = match_obj.group(1).strip()
return '#### ' + cap_group + ' ' + '#'*(line_size-fix_pad-len(cap_group))
text = re.sub(regex1, subst1, text, 0, re.MULTILINE)
text = re.sub(regex2, subst2, text, 0, re.MULTILINE)
# formatted text to return
return text
|
6eba4539aa7128d5654ddab7fe08a2e9df6dc738
| 30,310 |
def get_kernel_versions_async(loop=None):
"""
Execute dpkg commands asynchronously.
Args:
loop: asyncio event loop (optional)
Returns:
[DpkgCommandResult]: stats from the executed dpkg commands
"""
return subprocess_workflow.exec_and_parse_subprocesses_async(
[DpkgCommandParams()],
_get_dpkg_command_args_list,
parse_dpkg_output,
loop,
)
|
425eac2d2ef7e00512b04ed41f3269e094762557
| 30,311 |
import math
def autoencoder(
input_shape,
encoding_dim=512,
n_base_filters=16,
batchnorm=True,
batch_size=None,
name="autoencoder",
):
"""Instantiate Autoencoder Architecture.
Parameters
----------
input_shape: list or tuple of four ints, the shape of the input data. Should be
scaled to [0,1]. Omit the batch dimension, and include the number of channels.
Currently, only squares and cubes supported.
encoding_dim: int, the dimensions of the encoding of the input data. This would
translate to a latent code of dimensions encoding_dimx1.
n_base_filters: int, number of base filters the models first convolutional layer.
The subsequent layers have n_filters which are multiples of n_base_filters.
batchnorm: bool, whether to use batch normalization in the network.
batch_size: int, number of samples in each batch. This must be set when training on
TPUs.
name: str, name to give to the resulting model object.
Returns
-------
Model object.
"""
conv_kwds = {"kernel_size": 4, "activation": None, "padding": "same", "strides": 2}
conv_transpose_kwds = {
"kernel_size": 4,
"strides": 2,
"activation": None,
"padding": "same",
}
dimensions = input_shape[:-1]
n_dims = len(dimensions)
if not (n_dims in [2, 3] and dimensions[1:] == dimensions[:-1]):
raise ValueError("Dimensions should be of square or cube!")
Conv = getattr(layers, "Conv{}D".format(n_dims))
ConvTranspose = getattr(layers, "Conv{}DTranspose".format(n_dims))
n_layers = int(math.log(dimensions[0], 2))
# Input layer
inputs = x = layers.Input(shape=input_shape, batch_size=batch_size, name="inputs")
# Encoder
for i in range(n_layers):
n_filters = min(n_base_filters * (2 ** (i)), encoding_dim)
x = Conv(n_filters, **conv_kwds)(x)
if batchnorm:
x = layers.BatchNormalization()(x)
x = layers.ReLU()(x)
# Encoding of the input image
x = layers.Flatten(name="Encoding")(x)
# Decoder
x = layers.Reshape((1,) * n_dims + (encoding_dim,))(x)
for i in range(n_layers)[::-1]:
n_filters = min(n_base_filters * (2 ** (i)), encoding_dim)
x = ConvTranspose(n_filters, **conv_transpose_kwds)(x)
if batchnorm:
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU()(x)
# Output layer
outputs = Conv(1, 3, activation="sigmoid", padding="same")(x)
return models.Model(inputs=inputs, outputs=outputs)
|
dbb1983cb3b6adfcde823e6a2013e5517b57044f
| 30,312 |
import numpy
def SHAPER(B, D, LA):
"""
"""
LB = B.size
LD = D.size
A = numpy.zeros(LA)
LC = LB + LA - 1
LCD = LC + LD - 1
C = numpy.zeros(LCD)
INDEX = 0
ERRORS = numpy.zeros(LCD)
SPACE = numpy.zeros(3 * LA)
(A, LC, C, INDEX, ERRORS, S) = ER.SHAPER(LB, B, LD, D, LA, A, LC, C, INDEX, ERRORS, SPACE)
return (A, C, INDEX, ERRORS)
|
cbe86b69c073c36e0f5d97616c9de59f2b4c2652
| 30,313 |
from typing import Tuple
def _get_efron_values_single(
X: pd.DataFrame,
T: pd.Series,
E: pd.Series,
weights: pd.Series,
entries: None,
beta: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, float]:
"""
Calculates the first and second order vector differentials, with respect to beta.
Note that X, T, E are assumed to be sorted on T!
A good explanation for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
From https://cran.r-project.org/web/packages/survival/survival.pdf:
"Setting all weights to 2 for instance will give the same coefficient estimate but halve the variance. When
the Efron approximation for ties (default) is employed replication of the data will not give exactly the same coefficients as the
weights option, and in this case the weighted fit is arguably the correct one."
Parameters
----------
X: array
(n,d) numpy array of observations.
T: array
(n) numpy array representing observed durations.
E: array
(n) numpy array representing death events.
weights: array
(n) an array representing weights per observation.
beta: array
(1, d) numpy array of coefficients.
Returns
-------
hessian:
(d, d) numpy array,
gradient:
(1, d) numpy array
log_likelihood: float
"""
X = X.values
T = T.values
E = E.values
weights = weights.values
n, d = X.shape # n: samples; d: variables
hessian = zeros((d, d))
gradient = zeros((d,))
log_lik = 0
# Init risk and tie sums to zero
x_death_sum = zeros((d,))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = zeros((d,)), zeros((d,))
risk_phi_x_x, tie_phi_x_x = zeros((d, d)), zeros((d, d))
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
scores = weights * exp(dot(X, beta))
phi_x_is = scores[:, None] * X
phi_x_x_i = np.empty((d, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1): # i = n-1, n-2, n-3, ..., 3, 2, 1, 0
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i]
w = weights[i]
# Calculate phi values
phi_i = scores[i]
phi_x_i = phi_x_is[i]
# https://stackoverflow.com/a/51481295/1895939
phi_x_x_i = multiply.outer(xi, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_death_sum = x_death_sum + w * xi
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
tie_phi_x_x = tie_phi_x_x + phi_x_x_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
# There was at least one event and no more ties remain. Time to sum.
# This code is near identical to the _batch algorithm below. In fact, see _batch for comments.
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
increasing_proportion = arange(tied_death_counts) / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - multiply.outer(increasing_proportion, tie_phi_x)
a1 = einsum("ab,i->ab", risk_phi_x_x, denom) - einsum("ab,i->ab", tie_phi_x_x, increasing_proportion * denom)
else:
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + dot(x_death_sum, beta) + weighted_average * log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
# reset tie values
tied_death_counts = 0
weight_count = 0.0
x_death_sum = zeros((d,))
tie_phi = 0
tie_phi_x = zeros((d,))
tie_phi_x_x = zeros((d, d))
return hessian, gradient, log_lik
|
2d6a049e6894f3be6e002d22cc1c2b7d4705a66f
| 30,314 |
import re
def get_battery_information():
"""Return device's battery level."""
output = adb.run_adb_shell_command(['dumpsys', 'battery'])
# Get battery level.
m_battery_level = re.match(r'.*level: (\d+).*', output, re.DOTALL)
if not m_battery_level:
logs.log_error('Error occurred while getting battery status.')
return None
# Get battery temperature.
m_battery_temperature = re.match(r'.*temperature: (\d+).*', output, re.DOTALL)
if not m_battery_temperature:
logs.log_error('Error occurred while getting battery temperature.')
return None
level = int(m_battery_level.group(1))
temperature = float(m_battery_temperature.group(1)) / 10.0
return {'level': level, 'temperature': temperature}
|
dba773386e88728b3a1cf752c6b3bfa74b38963d
| 30,317 |
def _tensor_setitem_by_tuple_with_tuple(data, tuple_index, value):
"""
Tensor assignment.
Note:
Syntax support: A[B, C, D] = U.
Restraint condition: 1) A is a Tensor, and B, C, D are index Tensors.
2) A B and C could be broadcast.
3) U is a Tensor.
Inputs:
data (Tensor): Assigned tensor.
index (Tuple): A tuple of tensor, these tensor could be broadcast.
value (Tensor): Assignment tensor, should has the same data type as 'data'.
Outputs:
Tensor, element type and shape is same as data.
"""
indexes_types = compile_utils.hyper_map(F.typeof, tuple_index)
index_elements_type = const_utils.tuple_index_elements_type(indexes_types, const_utils.TENSOR_SETITEM)
if index_elements_type == const_utils.ALL_TENSOR:
indices = compile_utils.generate_indices_from_tuple_of_tensor(data,
tuple_index,
const_utils.TENSOR_SETITEM)
else:
indices = compile_utils.generate_indices_from_tuple_of_mixed_tensors(data,
tuple_index,
const_utils.TENSOR_SETITEM)
updates = compile_utils.generate_updates_from_tuple(data,
indices,
value,
const_utils.SET_ITEM_BY_TUPLE_OF_TENSOR)
return F.scatter_nd_update(data, indices, updates)
|
d00d08cb1391c96938bf5390c5e7f58bac6724a5
| 30,318 |
def templates_global_context(request):
"""
Return context for use in all templates.
"""
global_context = {
'constant_ddd': constants.DDD,
'constant_estado': constants.ESTADO,
'constant_municipio': constants.MUNICIPIO,
'constant_cep': constants.CEP,
'constant_pais': constants.PAIS,
'constant_current_year': constants.CURRENT_YEAR,
}
return global_context
|
500ce9eaf26631fdeaa48c4d9001847e713262f5
| 30,319 |
import warnings
from typing import Concatenate
def doubleunet(num_classes,
input_shape=(224, 224, 3),
model_weights=None,
num_blocks=5,
encoder_one_type='Default',
encoder_one_weights=None,
encoder_one_freeze=False,
encoder_one_filters=[32, 64, 128, 256, 512],
dspp_one_filters=256,
decoder_one_type='upsampling',
num_decoder_one_block_conv_layers=1,
decoder_one_filters=[512, 256, 128, 64, 32],
decoder_one_activation=None,
decoder_one_use_skip_connection=True,
decoder_one_use_batchnorm=True,
decoder_one_dropout_rate=0,
output_one_activation=None,
encoder_two_type='Default',
encoder_two_weights=None,
encoder_two_freeze=False,
encoder_two_filters=[32, 64, 128, 256, 512],
dspp_two_filters=256,
decoder_two_type='upsampling',
decoder_two_filters=[512, 256, 128, 64, 32],
num_decoder_two_block_conv_layers=1,
decoder_two_activation=None,
decoder_two_use_skip_connection=True,
decoder_two_use_batchnorm=True,
decoder_two_dropout_rate=0,
output_two_activation=None):
"""
Merge the doubleunet_encoder and doubleunet_decoder functions to instantiate
the doubleunet architecture for semantic segmantation tasks.
Args:
num_classes: number of the segmentation classes.
input_shape: a tuple containing image height, width and channels
respectively. Default to (224,224,3).
model_weights: (optional) link to pre-trained weights.
num_blocks: (optional) number of encoder and decoder blocks.
Default to 5.
############################ Encoder Blocks ########################
encoder_one_type & encoder_two_type:
type of model to build upon. One of 'Default',
'DenseNet121', 'DenseNet169' 'EfficientNetB0',
'EfficientNetB1', 'EfficientNetB2', 'EfficientNetB3',
'EfficientNetB4', 'EfficientNetB5', 'EfficientNetB6',
'EfficientNetB7', 'MobileNet', 'MobileNetV2',
'ResNet50', 'ResNet101', 'ResNet152',
'ResNet50V2', 'ResNet101V2', 'ResNet152V2',
'VGG16', 'VGG19'. Default encoder type is 'Default'.
encoder_one_weights & encoder_two_weights:
(optional) pre-trained weights for encoder function.
One of None (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded
encoder_one_freeze & encoder_two_freeze:
(optional) boolean to specify whether to train
encoder model parameters or not. Default is False.
encoder_one_filters & encoder_two_filters:
(optional) a list containing number of filters to use
for each encoder convolution blocks.
Default to [32, 64, 128, 256, 512].
############################ DSPP Blocks ###########################
dspp_one_filters & dspp_two_filters:
(optional) a list containing number of filters to use
for each DSSP block. Default to 256.
############################# Decoder Blocks #######################
decoder_one_type & decoder_two_type:
(optional) one of 'transpose' (to use Conv2DTanspose
operation for upsampling operation) or 'upsampling' (to
use UpSampling2D operation for upsampling operation).
Default to upsampling.
decoder_one_filters & decoder_two_filters:
(optional) a list containing number of filters to use
for each decoder convolution blocks.
Default to [512, 256, 128, 64, 32].
num_decoder_one_blocks & num_decoder_two_blocks:
(optional) number of decoder blocks to use. Default to 5.
decoder_one_filters & decoder_two_filters:
(optional) a list containing filter sizes for each
decoder block. Default to [32, 64, 128, 256, 512].
num_decoder_one_block_conv_layers & num_decoder_two_block_conv_layers:
(optional) number of convolution layers for each decoder
block (i.e. number of Conv2D layers after upsampling
layers). Default is 1.
decoder_one_activation & decoder_two_activation:
(optional) decoder activation name or function.
decoder_one_use_skip_connection & decoder_two_use_skip_connection:
(optional) one of True (to use residual/skip connections)
or False (not to use residual/skip connections).
Default to True.
decoder_use_batchnorm:
(optional) boolean to specify whether decoder layers
should use BatchNormalization or not.
Default to False.
decoder_dropout_rate:
(optional) dropout rate. Float between 0 and 1.
output_activation:
(optional) activation for output layer.
Default is either 'sigmoid' or 'softmax' based on
the value of the 'num_classes' argument.
Returns:
model: keras double-unet segmentation model
"""
#--------------------------------------------------------------------------#
# Validate and preprocess arguments
#--------------------------------------------------------------------------#
# 1. num_classes - check doubleunet_decoder functon
# 2. encoder_type - check doubleunet_encoder functon
# 3. input_shape - check doubleunet_encoder functon
# 2. input_shape
if not isinstance(input_shape, tuple):
raise ValueError("The `input_shape` argument should a tuple containing "
"the image width, height and channels respectively.")
if not len(input_shape) == 3:
warnings.warn("The `input_shape` argument should be a tuple containing "
"three integer values for each of the image width, "
"height, and channels respectively.")
# 4. model_weights
if not (model_weights in {None} or file_io.file_exists_v2(model_weights)):
warnings.warn('The `model_weights` argument should either be '
'`None` (random initialization), '
'or the path to the weights file to be loaded.')
# 5. encoder_weights - check doubleunet_encoder functon
# 6. encoder_freeze - check doubleunet_encoder functon
# 7. num_bottleneck_conv_layers - check doubleunet_bottleneck functon
# 8. num_bottleneck_conv_filters - check doubleunet_bottleneck functon
# 9. bottleneck_use_batchnorm - check doubleunet_bottleneck functon
# 10. num_decoder_blocks - check doubleunet_decoder functon
# 11. decoder_type - check doubleunet_decoder functon
# 12. decoder_filters - check doubleunet_decoder functon
# 13. num_decoder_block_conv_layers - check doubleunet_decoder functon
# 14. decoder_activation - check doubleunet_decoder functon
# 15. decoder_use_skip_connection - check doubleunet_decoder functon
# 16. decoder_use_batchnorm - check doubleunet_decoder functon
# 17. decoder_dropout_rate - check doubleunet_decoder functon
# 18. output_activation - check doubleunet_decoder functon
#--------------------------------------------------------------------------#
# Build Model
#--------------------------------------------------------------------------#
# Network 1
#--------------------------------------------------------------------------#
# 1. Get the encoder model, model output layer and skip connection layers
input_1 = Input(shape=(input_shape), name='input_1')
encoder_model_1, encoder_model_output_1, skip_connection_layers_1 = encoder(
encoder_type=encoder_one_type,
input_tensor=input_1,
encoder_weights=encoder_one_weights,
encoder_freeze=encoder_one_freeze,
num_blocks=num_blocks,
encoder_filters=encoder_one_filters
)
# 2. Get the ASPP/DSPP block output layer
dspp_output_1 = DilatedSpatialPyramidPooling(
dspp_input=encoder_model_output_1,
num_filters=dspp_one_filters
)
# 3. Decoder blocks
# Extend the model by adding the decoder blocks
output_1 = decoder(
num_classes=num_classes,
decoder_input=dspp_output_1,
skip_connection_layers_1 = skip_connection_layers_1,
skip_connection_layers_2= None,
decoder_type=decoder_one_type,
num_blocks=num_blocks,
decoder_filters=decoder_one_filters,
num_decoder_block_conv_layers=num_decoder_one_block_conv_layers,
decoder_activation=decoder_one_activation,
decoder_use_skip_connection=decoder_one_use_skip_connection,
decoder_use_batchnorm=decoder_one_use_batchnorm,
decoder_dropout_rate=decoder_one_dropout_rate,
output_activation=output_one_activation)
# Rename encoder model one layer names to avoid none of the layers from
# encoders one and two are the same.
enc_1_layers = [layer for layer in
Model(encoder_model_1.inputs, output_1).layers]
for layer in enc_1_layers:
layer._name = layer._name + str("_a")
#--------------------------------------------------------------------------#
# Network 2
#--------------------------------------------------------------------------#
input_2 = Concatenate(axis=-1, name='input_2')([output_1, input_1])
# 1. Get the encoder model, model output layer and skip connection layers
encoder_model_2, encoder_model_output_2, skip_connection_layers_2 = encoder(
encoder_type=encoder_two_type,
input_tensor=input_2,
encoder_weights=encoder_two_weights,
encoder_freeze=encoder_two_freeze,
num_blocks=num_blocks,
encoder_filters=encoder_two_filters
)
# 2. Get the ASPP/DSPP block output layer
dspp_output_2 = DilatedSpatialPyramidPooling(
dspp_input=encoder_model_output_2,
num_filters=dspp_two_filters
)
# 3. Decoder blocks
# Extend the model by adding the decoder blocks
output_2 = decoder(
num_classes=num_classes,
decoder_input=dspp_output_2,
skip_connection_layers_1 = skip_connection_layers_1,
skip_connection_layers_2 = skip_connection_layers_2,
decoder_type=decoder_two_type,
num_blocks=num_blocks,
decoder_filters=decoder_two_filters,
num_decoder_block_conv_layers=num_decoder_two_block_conv_layers,
decoder_activation=decoder_two_activation,
decoder_use_skip_connection=decoder_two_use_skip_connection,
decoder_use_batchnorm=decoder_two_use_batchnorm,
decoder_dropout_rate=decoder_two_dropout_rate,
output_activation=output_two_activation
)
# Rename encoder model two layer names if both encoder one and two are the same
enc_1_layers = [layer for layer in
Model(encoder_model_1.inputs, output_1).layers]
enc_2_layers = [layer for layer in
Model(encoder_model_2.inputs, output_2).layers
if layer not in enc_1_layers]
for layer in enc_2_layers:
layer._name = layer._name + str("_b")
outputs = Add()([output_1, output_2])
inputs = encoder_model_1.inputs
## Image Segmentation Model
model = Model(inputs, outputs)
return model
|
cf50030dfe2ace708b7ee192aa7e4631af2a5e2c
| 30,320 |
def get_lldp_neighbors(dut, interface=None):
"""
Get LLDP Neighbours Info
Author: Prudvi Mangadu ([email protected])
:param dut:
:param interface: localport
:return:
"""
command = "show lldp neighbors"
if interface:
command = "show lldp neighbors {}".format(interface)
return st.show(dut, command)
|
2fadb9f1c61a3b289b8d66e3e3cb0566c336bd03
| 30,322 |
def encode_onehot(batch_inputs, max_len=None):
"""One-hot encode a string input."""
if max_len is None:
max_len = get_max_input_len()
def encode_str(s):
tokens = CTABLE.encode(s)
unpadded_len = len(tokens)
if unpadded_len > max_len:
raise ValueError(f'Sequence too long ({len(tokens)}>{max_len}): \'{s}\'')
tokens = np.pad(tokens, [(0, max_len-len(tokens))], mode='constant')
return jax.nn.one_hot(tokens, CTABLE.vocab_size, dtype=jnp.float32)
return np.array([encode_str(inp) for inp in batch_inputs])
|
2cecbfd553cde1184720c3b0a5c666f5762b174d
| 30,323 |
def plot_confusion_matrix(cm,
normalize=True,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
plt.show() must be run to view the plot, this is not done
by this function.
Parameters
----------
cm : np.ndarray
the confusion matrix to plot
normalize : bool, optional
whether to normalise the matrix, by default True
title : string, optional
title of the plot, otherwise otherwise a sensible title is generated. By default None
cmap : matplotlib.colormap, optional
matplotlib colormap, by default plt.cm.Blues
Returns
-------
matplotlib.Axes
axes object of the plot generated
"""
if not title:
title = 'Normalized confusion matrix' if normalize else 'Confusion matrix, without normalization'
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
fig, ax = plt.subplots(figsize=(4,2))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=CLASSES, yticklabels=CLASSES,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
|
2dc9f5917d97278844c90eb851a45bc246f22c7c
| 30,324 |
def detect_id_type(sid):
"""Method that tries to infer the type of abstract ID.
Parameters
----------
sid : str
The ID of an abstract on Scopus.
Raises
------
ValueError
If the ID type cannot be inferred.
Notes
-----
PII usually has 17 chars, but in Scopus there are valid cases with only
16 for old converted articles.
Scopus ID contains only digits, but it can have leading zeros. If ID
with leading zeros is treated as a number, SyntaxError can occur, or the
ID will be rendered invalid and the type will be misinterpreted.
"""
sid = str(sid)
try:
isnumeric = sid.isnumeric()
except AttributeError: # Python2
isnumeric = unicode(sid, 'utf-8').isnumeric()
if not isnumeric:
if sid.startswith('2-s2.0-'):
id_type = 'eid'
elif '/' in sid or "." in sid:
id_type = 'doi'
elif 16 <= len(sid) <= 17:
id_type = 'pii'
elif isnumeric:
if len(sid) < 10:
id_type = 'pubmed_id'
else:
id_type = 'scopus_id'
try:
return id_type
except UnboundLocalError:
raise ValueError('ID type detection failed for \'{}\'.'.format(sid))
|
b9c6f1442f6824e990ac1275296bb50fdad682cd
| 30,326 |
def config_to_dict(plato_config):
""" Convert the plato config (can be nested one) instance to the dict. """
# convert the whole to dict - OrderedDict
plato_config_dict = plato_config._asdict()
def to_dict(elem):
for key, value in elem.items():
try:
value = value._asdict()
elem[key] = to_dict(value)
except:
pass
if isinstance(value, list):
for idx, value_item in enumerate(value):
try:
value_item = value_item._asdict()
value[idx] = to_dict(value_item)
except:
pass
elem[key] = value
return elem
plato_config_dict = to_dict(plato_config_dict)
return plato_config_dict
|
9e68c2859dc33370554f8015f96bd501f827c1b2
| 30,327 |
def analyze_single_user_info(result=load_data()):
"""
:param result:
:return:
examp: {user_id: 1, meal_info: {breakfast:{food_name:菜名,times:次数}}, {early_dinner:{...}}, {supper:{...}}}
"""
result = pd.DataFrame(result, columns=['user_id', 'user_name', 'food_code', 'food_name', 'meal_type', 'eat_time'])
user_id = set(result['user_id'])
for i in user_id:
user_info = result[result['user_id'] == i]
breakfast_info = user_info[user_info['meal_type'] == MEAL_TYPE[0]]
early_dinner_info = user_info[user_info['meal_type'] == MEAL_TYPE[1]]
dinner_info = user_info[user_info['meal_type'] == MEAL_TYPE[0]]
def analyze_meal_info(meal_info):
"""
:param meal_info:
:return:
"""
food_name = set(meal_info['food_name'])
result_set = []
for name in food_name:
tmp = {'food_name': name, 'times': len(meal_info[meal_info['food_name'] == name])}
result_set.append(tmp)
return result_set
meal_type = {'breakfast': analyze_meal_info(breakfast_info),
'early_dinner': analyze_meal_info(early_dinner_info),
'supper': analyze_meal_info(dinner_info)}
user_dict = {'user_id': i, 'meal_info': meal_type}
return user_dict
|
04b8084efce6e5f5707f61d114cdc3a98037c1c1
| 30,328 |
def mergeSort(data):
""" Implementation of the merge sort algorithm in ascending order """
n = len(data)
if n == 1:
return data
else:
midIndex = (int)(n/2)
leftHalf = mergeSort(data[0:midIndex])
rightHalf = mergeSort(data[midIndex:n])
return mergeHalves(leftHalf, rightHalf)
|
68e693fdcaaf0127372ad3477df64473e989a2e2
| 30,329 |
def compute_gradient_logistic(y, tx, w):
"""Function to compute gradient of loss of logistic regression for given w.
Args:
y (numpy array): Matrix output of size N x 1.
tx (numpy array): Matrix input of size N x D.
w (numpy array): Matrix weight (parameters of the model) of size D x 1.
Returns:
gradient (numpy array) : Matrix Gradient of size D x 1.
"""
y_hat = sigmoid(tx.dot(w))
gradient = (np.transpose(tx)).dot(y_hat - y)
return gradient
|
db525602a5d64dda8e64592770210315da29e64f
| 30,330 |
import re
def parse_py(fname):
"""Look for links in a .py file."""
with open(fname) as f:
lines = f.readlines()
urls = set()
for i, line in enumerate(lines):
for url in find_urls(line):
# comment block
if line.lstrip().startswith('# '):
subidx = i + 1
while True:
nextline = lines[subidx].strip()
if re.match('^# .+', nextline):
url += nextline[1:].strip()
else:
break
subidx += 1
urls.add(url)
return list(urls)
|
c95f6f326a74bfc3e123df4ac09171e0a44d4486
| 30,331 |
def lwp_cookie_str(cookie):
"""Return string representation of Cookie in an the LWP cookie file format.
Actually, the format is extended a bit -- see module docstring.
"""
h = [(cookie.name, cookie.value),
("path", cookie.path),
("domain", cookie.domain)]
if cookie.port is not None: h.append(("port", cookie.port))
if cookie.path_specified: h.append(("path_spec", None))
if cookie.port_specified: h.append(("port_spec", None))
if cookie.domain_initial_dot: h.append(("domain_dot", None))
if cookie.secure: h.append(("secure", None))
if cookie.expires: h.append(("expires",
time2isoz(float(cookie.expires))))
if cookie.discard: h.append(("discard", None))
if cookie.comment: h.append(("comment", cookie.comment))
if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
if cookie.rfc2109: h.append(("rfc2109", None))
keys = cookie.nonstandard_attr_keys()
keys.sort()
for k in keys:
h.append((k, str(cookie.get_nonstandard_attr(k))))
h.append(("version", str(cookie.version)))
return join_header_words([h])
|
5d7735397fdb23e629ed4db844cbbd44bc386674
| 30,332 |
def fiscalyear():
"""Retrieve Fiscal Years and display for selection by user."""
cascs = db.session.query(casc).order_by(casc.name).all()
cascs_and_fys = {}
class F(FyForm):
pass
list_fy = []
for curr_casc in cascs:
cascs_and_fys[curr_casc.name] = {}
cascs_and_fys[curr_casc.name]["id"] = curr_casc.id
fys = db.session.query(FiscalYear).order_by(
FiscalYear.name).filter(
FiscalYear.casc_id == curr_casc.id).all()
cascs_and_fys[curr_casc.name]["fiscal_years"] = []
for fy in fys:
fiscal_year = {}
list_fy.append("fy" + str(fy.id))
fiscal_year["id"] = fy.id
fiscal_year["name"] = fy.name
cascs_and_fys[curr_casc.name]["fiscal_years"].append(fiscal_year)
setattr(F, "fy" + str(fy.id), BooleanField(fy.name))
form = F()
if form.validate_on_submit():
id_list = []
projects = []
for fy in list_fy:
fy_attr = getattr(form, fy)
selected = fy_attr.data
if selected:
id_list.append(fy.replace("fy", ""))
print('length of id_list:', len(id_list))
for i in id_list:
fy_model = db.session.query(FiscalYear).get(i)
for proj in fy_model.projects:
project_dict = {}
project_dict['fy_id'] = i
project_dict['casc_id'] = fy_model.casc_id
project_dict['proj_id'] = proj.id
projects.append(project_dict)
session["projects"] = projects
print('length of projects:', len(projects))
return redirect(url_for('main.report'))
elif request.method == 'GET':
pass
return render_template('fiscalYears.html',
form=form,
cascs_and_fys=cascs_and_fys,
title="Select Fiscal Years"), 400
|
9656aafc00083097417eae8b6633ea99ac9bb9e4
| 30,333 |
def forbidden(error) -> str:
""" Forbidden resource
"""
return jsonify({"error": error.description}), 403
|
6c9fb0c1ad696b9337a2345a82613f2359a00778
| 30,334 |
def get_parameter(model, name):
"""
Finds the named parameter within the given model.
"""
for n, p in model.named_parameters():
if n == name:
return p
raise LookupError(name)
|
ba35b743d9189c94da0dcce27630bba311ea8a46
| 30,335 |
def _update_method(oldmeth, newmeth):
"""Update a method object."""
# XXX What if im_func is not a function?
_update(oldmeth.im_func, newmeth.im_func)
return oldmeth
|
1c05204067610acb4f540839e647466f07952323
| 30,336 |
def get_stats_asmmemmgr(space):
"""Returns the raw memory currently used by the JIT backend,
as a pair (total_memory_allocated, memory_in_use)."""
m1 = jit_hooks.stats_asmmemmgr_allocated(None)
m2 = jit_hooks.stats_asmmemmgr_used(None)
return space.newtuple([space.newint(m1), space.newint(m2)])
|
16aa01635d08ea39ab9051c15e60b11c3bc027a5
| 30,338 |
def parse_function(filename):
""" Parse a filename and load the corresponding image. Used for faces.
Parameters
----------
filename : str
Path to the faces image.
Returns
-------
image : tensorflow.Tensor
Image object.
Raises
------
None
Notes
-----
None
"""
image_string = tf.io.read_file(filename)
#Don't use tf.image.decode_image, or the output shape will be undefined
image = tf.image.decode_jpeg(image_string, channels=3)
#This will convert to float values in [0, 1], so do not use it here.
# we normalize later.
#image = tf.image.convert_image_dtype(image, tf.float32)
return image
|
878d00c1f9c7dc37041e79a96e030b249e2ca350
| 30,339 |
def calc_final_speed(v_i, a, d):
"""
Computes the final speed given an initial speed, distance travelled,
and a constant acceleration.
:param:
v_i: initial speed (m/s)
a: acceleration (m/s^2)
d: distance to be travelled (m)
:return:
v_f: the final speed (m/s)
"""
discr = 2 * d * a + v_i ** 2
if discr > 0:
v_f = np.sqrt(discr)
else:
v_f = 0
return v_f
|
14dbf3f6e7391b0fd0c1796f77c5966875b689b8
| 30,340 |
def write_table(fh, data, samples=None, tree=None, rankdic=None, namedic=None,
name_as_id=False):
"""Write a profile to a tab-delimited file.
Parameters
----------
fh : file handle
Output file.
data : dict
Profile data.
samples : list, optional
Ordered sample ID list.
tree : dict, optional
Taxonomic tree, to inform "Lineage" column.
rankdic : dict, optional
Rank dictionary, to inform "Rank" column.
namedic : dict, optional
Taxon name dictionary, to inform "Name" column.
name_as_id : bool, optional
Replace feature IDs with names. It applies to row headers and "Lineage"
column, and removes "Name" column.
Returns
-------
int
Number of samples in the table.
int
Number of features in the table.
Notes
-----
The output table will have columns as samples and rows as features.
Optionally, three metadata columns, "Name", "Rank" and "Lineage" will be
appended to the right of the table.
"""
if samples:
samples = [x for x in samples if x in data]
else:
samples = sorted(data)
# table header
header = ['#FeatureID'] + samples
if namedic and not name_as_id:
header.append('Name')
if rankdic:
header.append('Rank')
if tree:
header.append('Lineage')
print('\t'.join(header), file=fh)
# table body
nrow = 0
for key in sorted(allkeys(data)):
# stratification
stratum, feature = key if isinstance(key, tuple) else (None, key)
# get feature name
name = namedic[feature] if namedic and feature in namedic else None
# fill row header (feature Id or name)
head = name if name_as_id and name else feature
row = [f'{stratum}|{head}'] if stratum else [head]
# fill cell values (feature counts)
for sample in samples:
row.append(str(data[sample][key]) if key in data[sample] else '0')
# fill name column
if namedic and not name_as_id:
row.append(name or '')
# fill rank column
if rankdic:
row.append(rankdic[feature] if feature in rankdic else '')
# fill lineage column
if tree:
row.append(get_lineage_gg(
feature, tree, namedic if name_as_id else None))
# print row
print('\t'.join(row), file=fh)
nrow += 1
return len(samples), nrow
|
40698102a0a000e3ec2ba7fff6cff35e6cf2b598
| 30,342 |
def data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_changed_attributesvalue_name_get(uuid, notification_uuid, value_name): # noqa: E501
"""data_context_notification_context_notif_subscriptionuuid_notificationnotification_uuid_changed_attributesvalue_name_get
returns tapi.notification.NameAndValueChange # noqa: E501
:param uuid: Id of notif-subscription
:type uuid: str
:param notification_uuid: Id of notification
:type notification_uuid: str
:param value_name: Id of changed-attributes
:type value_name: str
:rtype: TapiNotificationNameAndValueChange
"""
return 'do some magic!'
|
7171e1dab60d838d0a321e1d339b07511674a4f6
| 30,343 |
def angular_misalignment_loss_db(n, w, theta, lambda0):
"""
Calculate the loss due to angular fiber misalignment.
See Ghatak eqn 8.75
Args:
n: index between fiber ends [-]
w: mode field radius [m]
theta: angular misalignment [radians]
lambda0: wavelength in vacuum [m]
Returns:
angular misalignment loss in dB [-]
"""
return 4.34 * (np.pi * w * theta * n / lambda0)**2
|
4233dad15b3840dda95a762d32eec657a423d28d
| 30,344 |
def replace_number(token):
"""Replaces a number and returns a list of one or multiple tokens."""
if number_match_re.match(token):
return number_split_re.sub(r' @\1@ ', token)
return token
|
c5954c447142581efd80aedf0215e66240ef89ae
| 30,345 |
import timeit
from typing import DefaultDict
def dnscl_rpz(
ip_address: str,
filename: str = FILENAME,
tail_num: int = 0,
quiet_mode: bool = False,
) -> int:
"""Return rpz names queried by a client IP address."""
start_time = timeit.default_timer()
rpz_dict: DefaultDict = defaultdict(int)
line_count = 0
ip_address_search = ip_address + "#"
if tail_num:
syslog = tail(filename, tail_num)
else:
syslog = tail(filename)
for line in syslog:
line = line.decode("utf-8")
if ip_address_search in line:
if "QNAME" in line and "SOA" not in line:
fields = line.strip().split(" ")
rpz_domain_fields = find_rpz_domain_field(fields).split("/")
rpz_domain = rpz_domain_fields[0]
if len(fields) > 11:
rpz_dict[rpz_domain] += 1
line_count += 1
rpz_list_sorted = sort_dict(rpz_dict)
elapsed_time = timeit.default_timer() - start_time
print(f"{ip_address} total queries: {line_count}")
print("queries: ")
for domain_name, query_count in rpz_list_sorted:
print(query_count, "\t", domain_name)
if not quiet_mode:
print(
f"\nSummary: Searched {ip_address} and found {line_count}",
f"queries for {len(rpz_dict)} rpz names.",
)
print(f"Query time: {round(elapsed_time, 2)} seconds")
return line_count
|
951d40f56a7b12a454499524da36e39b1f91b2bd
| 30,346 |
def sweep_centroids(nrays, rscale, nbins, elangle):
"""Construct sweep centroids native coordinates.
Parameters
----------
nrays : int
number of rays
rscale : float
length [m] of a range bin
nbins : int
number of range bins
elangle : float
elevation angle [radians]
Returns
-------
coordinates : 3d array
array of shape (nrays,nbins,3) containing native centroid radar
coordinates (slant range, azimuth, elevation)
"""
ascale = 2 * np.pi / nrays
azimuths = ascale / 2. + np.linspace(0, 2 * np.pi, nrays, endpoint=False)
ranges = np.arange(nbins) * rscale + rscale / 2.
coordinates = np.empty((nrays, nbins, 3), dtype=float)
coordinates[:, :, 0] = np.tile(ranges, (nrays, 1))
coordinates[:, :, 1] = np.transpose(np.tile(azimuths, (nbins, 1)))
coordinates[:, :, 2] = elangle
return coordinates
|
0d5d39589a6b6945618d4cd122c88a9a8f711f57
| 30,347 |
import time
def toc():
"""
对应MATLAB中的toc
:return:
"""
t = time.clock() - globals()['tt']
print('\nElapsed time: %.8f seconds\n' % t)
return t
|
ce7d5898972fa751178ab35a41736fd136f85d24
| 30,348 |
def read_images_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for _ in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
|
e1baf9988b74a8e0108d84bca48d2cf2f10f7358
| 30,350 |
def __no_conflicts(items):
"""Return True if each possible pair, from a list of items, has no conflicts."""
return all(__no_conflict(combo[0], combo[1]) for combo in it.combinations(items, 2))
|
761641bd59162e4714ce4ab04274307353f0aefa
| 30,352 |
def valid_tetrodes(tetrode_ids, tetrode_units):
"""
Only keep valid tetrodes with neuron units so that there is corresponding spike train data.
:param tetrode_ids: (list) of tetrode ids in the order of LFP data
:param tetrode_units: (dict) number of neuron units on each tetrode
:return: (list) of tetrode ids with neuron units
"""
return [x for x in tetrode_ids if tetrode_units[x] > 0]
|
c887f5e5c29d841da63fe0cd56c41eda5ddde891
| 30,354 |
from datetime import datetime
def get_us_week(date):
"""Determine US (North American) week number"""
# Each date belongs to some week. Each week has a Saturday. The week_sat_offset is number of
# days between the Saturday and the date:
week_sat_offset = (12 - date.weekday()) % 7
week_sat = date + datetime.timedelta(days=week_sat_offset)
week_year = week_sat.year
frst_sat_offset = (12 - datetime.date(week_year, 1, 1).weekday()) % 7
frst_sat = datetime.date(week_year, 1, 1) + datetime.timedelta(days=frst_sat_offset)
return (((date - frst_sat).days - 1) // 7) + 2
|
30e7f7179d732cdf08c0dcdeff627c889af6c340
| 30,356 |
def is_street_name(elem):
"""This function takes an element and returns whether it contains an attrib key
'addr:street'.
This is an modification from https://classroom.udacity.com/nanodegrees/nd002/parts/0021345404/modules/316820862075461/lessons/5436095827/concepts/54446302850923"""
return (elem.attrib["k"] == "addr:street") or (elem.attrib["k"] == "addr:street_1")
|
2b753fab69959200cc79895f382767af76295420
| 30,357 |
def read_youtube_urls():
"""
Required format that the txt file containing the youtube urls must have:
url_1
url_2
.
.
.
url_n
:param filepath:
:return:
"""
yt_urls = []
file_to_read = askopenfile(mode="r", filetypes=[("Text file", "*.txt")])
if file_to_read is not None:
while True:
curr_url = file_to_read.readline()
cleaned_curr_url = curr_url.strip().rstrip("\n").strip("\r").strip("\t")
if not curr_url:
break
if not cleaned_curr_url:
continue
if YOUTUBE_URL_REGEX.findall(cleaned_curr_url):
yt_urls.append(cleaned_curr_url)
else:
show_error_message(
f'"{cleaned_curr_url}" IS NOT A VALID YOUTUBE URL. SKIPPED.'
)
return yt_urls
|
5a8d505fe39d35c117ceaef33cc878f5ed7f5a1c
| 30,359 |
def _get_search_direction(state):
"""Computes the search direction to follow at the current state.
On the `k`-th iteration of the main L-BFGS algorithm, the state has collected
the most recent `m` correction pairs in position_deltas and gradient_deltas,
where `k = state.num_iterations` and `m = min(k, num_correction_pairs)`.
Assuming these, the code below is an implementation of the L-BFGS two-loop
recursion algorithm given by [Nocedal and Wright(2006)][1]:
```None
q_direction = objective_gradient
for i in reversed(range(m)): # First loop.
inv_rho[i] = gradient_deltas[i]^T * position_deltas[i]
alpha[i] = position_deltas[i]^T * q_direction / inv_rho[i]
q_direction = q_direction - alpha[i] * gradient_deltas[i]
kth_inv_hessian_factor = (gradient_deltas[-1]^T * position_deltas[-1] /
gradient_deltas[-1]^T * gradient_deltas[-1])
r_direction = kth_inv_hessian_factor * I * q_direction
for i in range(m): # Second loop.
beta = gradient_deltas[i]^T * r_direction / inv_rho[i]
r_direction = r_direction + position_deltas[i] * (alpha[i] - beta)
return -r_direction # Approximates - H_k * objective_gradient.
```
Args:
state: A `LBfgsOptimizerResults` tuple with the current state of the
search procedure.
Returns:
A real `Tensor` of the same shape as the `state.position`. The direction
along which to perform line search.
"""
# The number of correction pairs that have been collected so far.
num_elements = tf.minimum(
state.num_iterations,
distribution_util.prefer_static_shape(state.position_deltas)[0])
def _two_loop_algorithm():
"""L-BFGS two-loop algorithm."""
# Correction pairs are always appended to the end, so only the latest
# `num_elements` vectors have valid position/gradient deltas.
position_deltas = state.position_deltas[-num_elements:]
gradient_deltas = state.gradient_deltas[-num_elements:]
# Pre-compute all `inv_rho[i]`s.
inv_rhos = tf.reduce_sum(
input_tensor=gradient_deltas * position_deltas, axis=1)
def first_loop(acc, args):
_, q_direction = acc
position_delta, gradient_delta, inv_rho = args
alpha = tf.reduce_sum(input_tensor=position_delta * q_direction) / inv_rho
return (alpha, q_direction - alpha * gradient_delta)
# Run first loop body computing and collecting `alpha[i]`s, while also
# computing the updated `q_direction` at each step.
zero = tf.zeros_like(inv_rhos[0])
alphas, q_directions = tf.scan(
first_loop, [position_deltas, gradient_deltas, inv_rhos],
initializer=(zero, state.objective_gradient), reverse=True)
# We use `H^0_k = gamma_k * I` as an estimate for the initial inverse
# hessian for the k-th iteration; then `r_direction = H^0_k * q_direction`.
gamma_k = inv_rhos[-1] / tf.reduce_sum(input_tensor=gradient_deltas[-1] *
gradient_deltas[-1])
r_direction = gamma_k * q_directions[0]
def second_loop(r_direction, args):
alpha, position_delta, gradient_delta, inv_rho = args
beta = tf.reduce_sum(input_tensor=gradient_delta * r_direction) / inv_rho
return r_direction + (alpha - beta) * position_delta
# Finally, run second loop body computing the updated `r_direction` at each
# step.
r_directions = tf.scan(
second_loop, [alphas, position_deltas, gradient_deltas, inv_rhos],
initializer=r_direction)
return -r_directions[-1]
return prefer_static.cond(tf.equal(num_elements, 0),
(lambda: -state.objective_gradient),
_two_loop_algorithm)
|
5659dd49c9dcf67b65c3952a839df6c9b099ed76
| 30,360 |
def basevectors_sm(time, dipole=None):
"""
Computes the unit base vectors of the SM coordinate system with respect to
the standard geographic coordinate system (GEO).
Parameters
----------
time : float or ndarray, shape (...)
Time given as modified Julian date, i.e. with respect to the date 0h00
January 1, 2000 (mjd2000).
dipole : ndarray, shape (3,), optional
Dipole spherical harmonics :math:`g_1^0`, :math:`g_1^1` and
:math:`h_1^1`. Defaults to ``basicConfig['params.dipole']``.
Returns
-------
sm_1, sm_2, sm_3 : ndarray, shape (..., 3)
SM unit base vectors. The leading dimension agrees with the shape of
``time``, while the last dimension contains the unit vector
components in terms of GEO.
"""
if dipole is None:
dipole = basicConfig['params.dipole']
vec = _dipole_to_unit(dipole)
# get sun's position at specified times and convert to cartesian
theta_sun, phi_sun = sun_position(time)
x_sun, y_sun, z_sun = spherical_to_cartesian(1, theta_sun, phi_sun)
# create array in which the sun's vector resides in last dimension
s = np.empty(x_sun.shape + (3,))
s[..., 0] = x_sun
s[..., 1] = y_sun
s[..., 2] = z_sun
# set third unit base vector of SM to dipole unit vector
sm_3 = np.empty(x_sun.shape + (3,))
sm_3[..., 0] = vec[0]
sm_3[..., 1] = vec[1]
sm_3[..., 2] = vec[2]
# compute second base vector of SM using the cross product of the IGRF
# dipole unit vector and the sun direction vector
sm_2 = np.cross(sm_3, s)
norm_sm_2 = np.linalg.norm(sm_2, axis=-1, keepdims=True)
sm_2 = sm_2 / norm_sm_2
# compute third unit base vector using the cross product of second and
# third unit base vector
sm_1 = np.cross(sm_2, sm_3)
return sm_1, sm_2, sm_3
|
434d2ad867aaefb483f8ec212943fc1af1f4949b
| 30,362 |
def rewrite_metadata(content, dic):
"""From content, which is the old text with the metadata and dic which has the new data, return new_txt which has data replaced by dic content, with relevant headers added """
#Splitting into headers and body. Technically, body is a list of paragraphs where first one is the headers
new_headers = ""
body = content.split("\n\n")
headers = body[0]
#Replacing data in headers
for line in headers.split("\n") :
has_match = False
#Replace data in preexisting line
for key in list(dic.keys()) :
if line.startswith(key) :
new_headers = new_headers + key + ": " + str(dic[key]) + "\n"
del dic[key]
has_match = True
#Copies existing header that is not overwrote by dic
if not has_match :
new_headers = new_headers + line + "\n"
# In case we forgot to add a line manually
for left in list(dic.keys()) :
new_headers = new_headers + left + ": " + str(dic[left]) + "\n"
#Formatting, joining new text
body[0] = new_headers
new_txt = "\n\n".join(body)
return new_txt
|
14f7da66f19c24d073f1fdee4b56d49d28320e71
| 30,363 |
def rotate_points(points, axis, angle, origin=None):
"""Rotates points around an arbitrary axis in 3D (radians).
Parameters:
points (sequence of sequence of float): XYZ coordinates of the points.
axis (sequence of float): The rotation axis.
angle (float): the angle of rotation in radians.
origin (sequence of float): Optional. The origin of the rotation axis.
Default is ``[0.0, 0.0, 0.0]``.
Returns:
list: the rotated points
Notes:
For more info, see [1]_.
References:
.. [1] Wikipedia. *Rotation matrix*.
Available at: https://en.wikipedia.org/wiki/Rotation_matrix.
"""
# rotation matrix
R = rotation_matrix(angle, axis, origin)
# apply rotation
points = transform(points, R)
return points
|
a2eb1857dac96d46f7319e638423164ae6951ebe
| 30,364 |
def resolve_translation(instance, info, language_code):
"""Get translation object from instance based on language code."""
loader = TYPE_TO_TRANSLATION_LOADER_MAP.get(type(instance))
if loader:
return loader(info.context).load((instance.pk, language_code))
raise TypeError(f"No dataloader found to {type(instance)}")
|
50ada7fd7d681a5ca8def13a5f07c9fe73f4461a
| 30,365 |
def reverse_dict_old(dikt):
"""
takes a dict and return a new dict with old values as key and old keys as values (in a list)
example
_reverse_dict({'AB04a':'b', 'AB04b': 'b', 'AB04c':'b', 'CC04x': 'c'})
will return
{'b': ['AB04a', 'AB04b', 'AB04c'], 'c': 'CC04x'}
"""
new_dikt = {}
for k, v in dikt.items():
if v in new_dikt:
new_dikt[v].append(k)
else:
new_dikt[v] = [k]
return new_dikt
|
50155858fbbe52dc8daae66e6a94c8885b80ba05
| 30,366 |
def get_active_user(request):
"""
Endpoint for getting the active user
through the authtoken
"""
return Response(UserSerializer(request.user, context={'is_public_view': False}).data, status=status.HTTP_200_OK)
|
b86214eee8c34c53ed66992420f13f64cc2bda30
| 30,367 |
def upsert_website(admin_id, root, data, force_insert=False):
"""Method to update and insert new website to live streaming.
Args:
admin_id (str): Admin privileges flag.
root (str): Root privileges activation flag.
data (dict): website data structure.
force_insert (bool): Force insert Flag for updating the website information.
"""
try:
if not is_admin(admin_id):
root = False
else:
root = root in ["true", "True"]
if root:
result = seer.online_streamer.add_website(data["name"], data["url"], data["server"], force_insert=force_insert)
else:
result = False
except Exception:
result = False
return result
|
bc118cf7c42a375cc458b92713d4e4f802239d3c
| 30,369 |
def rest_query_object_by_id(bc_app, url, obj_id, json_obj_name, object_type, para_query_mode=False):
"""
query object by id
:param bc_app: used to attach app sign
:param url: do NOT contain params at the end
:param obj_id: object id
:param json_obj_name: like 'plan' for plan query
:param object_type: object type like beecloud.entity.BCPlan
:param para_query_mode: true if query string is para={}, else k1=v1&k2=v2
:return: beecloud.entity.BCResult
"""
query_param = _TmpObject()
attach_app_sign(query_param, BCReqType.QUERY, bc_app)
if para_query_mode:
url = url + '/' + obj_id + '?para=' + obj_to_quote_str(query_param)
tmp_resp = http_get(url, bc_app.timeout)
else:
tmp_resp = http_get(url + '/' + obj_id, bc_app.timeout, obj_to_dict(query_param))
# if err encountered, [0] equals 0
if not tmp_resp[0]:
return tmp_resp[1]
# [1] contains result dict
resp_dict = tmp_resp[1]
bc_result = BCResult()
set_common_attr(resp_dict, bc_result)
if not bc_result.result_code:
setattr(bc_result, json_obj_name, parse_dict_to_obj(resp_dict.get(json_obj_name), object_type))
return bc_result
|
2ed5390fba651c5874cfc51e472629ba9ad4369b
| 30,370 |
import re
import unicodedata
def bert_clean_text(text):
"""Performs invalid character removal and whitespace cleanup on text."""
text = re.sub('[_—.]{4,}', '__', text)
text = unicodedata.normalize("NFKC", text)
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
# elif _is_whitespace(char):
# output.append(" ")
else:
output.append(char)
return "".join(output)
|
e31930f7eb04cfc24f5dc2dd031de40d58643027
| 30,371 |
import random
def get_successors(curr_seq):
""" Function to generate a list of 100 random successor sequences
by swapping any cities. Please note that the first and last city
should remain unchanged since the traveller starts and ends in
the same city.
Parameters
----------
curr_seq : [list]
[list of cities]
Returns
-------
[list of list]
[list of list of random cities]
"""
successor_list = list([])
A = 0 #index variable for swapping
B = 0 #index variable for swapping
copy_list = list([])
for i in range(100):
copy_list = curr_seq[:]
A = random.randint(1, len(curr_seq) - 2)
B = random.randint(1, len(curr_seq) - 2)
sequence = swap(copy_list, A, B)
successor_list.append(sequence)
return successor_list
|
db928f0baed2c46211f9633c2e2223e39c177dbe
| 30,373 |
def Q(lambda_0, lambda_, eps_c, Delta, norm_zeta2, nu):
"""
Quadratic upper bound of the duality gap function initialized at lambda_0
"""
lmd = lambda_ / lambda_0
Q_lambda = (lmd * eps_c + Delta * (1. - lmd) +
0.5 * nu * norm_zeta2 * (1. - lmd) ** 2)
return Q_lambda
|
e7c624d822713efd9a63e92d40ecb9c13d5ee8d6
| 30,374 |
def solve(equation):
"""
Solves equation using shunting-yard algorithm
:param equation: string
equation to be solved
:return: float
result of equation
"""
postfix = rpn(equation)
result = shunting_yard(postfix)
return result
|
c57dc10b4c41f048a5690548a7155550b52d8d1c
| 30,375 |
def get_project_url(): # pragma no cover
"""Open .git/config file and git the url from it."""
project_info = {}
try:
with open('./.git/config', 'r') as git_config:
for line in git_config:
if "url = git@" in line:
dont_need, need = line.split(' = ')
if "url = git@" in line:
dont_need, need = line.split('@')
url = need.strip()
url = url.replace(':', '/')
url = url.replace('.git', '')
url = url.replace(url, 'https://' + url)
project_info['url'] = url
break
elif "url = https://github.com" in line:
dont_need, need = line.split(' = ')
url = need.strip()
url = url.replace(".git", '')
project_info['url'] = url
break
except FileNotFoundError:
project_info['url'] = "YOUR PROJECT URL HERE"
project_info['project_user'] = "YOUR NAME HERE"
project_info['project_name'] = "YOUR PROJECT NAME HERE"
project_info['project_user_profile_url'] = "YOUR USER PROFILE URL HERE"
return project_info
project_user = get_user_name(url)
project_info['project_user'] = project_user
project_name = get_project_name(url)
project_info['project_name'] = project_name
project_user_profile_url = get_user_profile_url(project_user)
project_info['project_user_profile_url'] = project_user_profile_url
return project_info
|
d296a23372c22adfd35e4c0ea463db2fbac557b1
| 30,376 |
from datetime import datetime
def sign_out(entry, time_out=None, forgot=False):
"""Sign out of an existing entry in the timesheet. If the user
forgot to sign out, flag the entry.
:param entry: `models.Entry` object. The entry to sign out.
:param time_out: (optional) `datetime.time` object. Specify the sign out time.
:param forgot: (optional) If true, user forgot to sign out. Entry will be flagged as forgotten.
:return: The signed out entry.
""" # noqa
if time_out is None:
time_out = datetime.today().time()
if forgot:
entry.forgot_sign_out = True
logger.info(
'{} forgot to sign out on {}.'.format(entry.user_id, entry.date)
)
else:
entry.time_out = time_out
logger.info('{} ({}) signed out.'.format(entry.user_id, entry.user_type))
return entry
|
c94ce2231dda115a53ea41a12dd04cbcd728088f
| 30,377 |
def get_button_write(deck_id: str, page: int, button: int) -> str:
"""Returns the text to be produced when the specified button is pressed"""
return _button_state(deck_id, page, button).get("write", "")
|
34cec488aa5245a620953319ce5dab8a0b7032e0
| 30,378 |
def opensafety_a(data: bytes) -> int:
"""
Compute a CRC-16 checksum of data with the opensafety_a algorithm.
:param bytes data: The data to be computed
:return: The checksum
:rtype: int
:raises TypeError: if the data is not a bytes-like object
"""
_ensure_bytes(data)
return _crc_16_opensafety_a(data)
|
be2a432874c50e7edd6af0555ed4cd2a7fb4c4b2
| 30,379 |
def EM_frac(pdf, iters=30, EPS=1E-12, verbose=True):
""" EM-algorithm for unknown integrated class fractions
Args:
pdf : (n x K) density (pdf) values for n measurements, K classes
iter : Number of iterations
Returns:
frac : Integrated class fractions
"""
n = pdf.shape[0]
K = pdf.shape[1]
P = np.zeros((n,K))
frac = np.ones(K) / K
for k in range(iters):
# Loop over observations
for i in range(n):
# E-step, obtain normalized probabilities
P[i,:] = pdf[i,:] * frac[:]
P[i,:] /= (np.sum(P[i,:]) + EPS)
# M-step, update fractions by averaging over observations
frac = np.sum(P,axis=0) / n
if verbose:
print(f'EM_frac: iter {k:4}, NLL = {mixture_nll(pdf,frac):.3f}, frac = {frac}')
return frac
|
7944e75b955b27cc0c7479a5eb7b3e6a6d656ede
| 30,380 |
def gaussian_loss(y_true, y_pred, interval, eta):
""" non zero mean absolute loss for one batch
This function parameterizes a loss of the form
Loss = - exp(- x ^ 2 / 2*sigma ^ 2)
where x = y_true - y_pred and
sigma = eta * y_true
and eta is a constant, generally much less than 1
Args:
y_true: true depth
y_pred: predicted depth
interval: depth interval used
eta: multiplictive constant appearing in standard deviations of gaussian loss
"""
with tf.name_scope('MAE'):
shape = tf.shape(y_pred)
interval = tf.reshape(interval, [shape[0]])
# mask_true is a tensor of 0's and 1's, where 1 is for valid pixels and 0 for invalid pixels
mask_true = tf.cast(tf.not_equal(y_true, 0.0), dtype='float32')
# The number of valid pixels in the depth map -- used for taking the average over only valid pixels
num_valid_pixels = tf.abs(tf.reduce_sum(
mask_true, axis=[1, 2, 3])) + 1e-6
# The standard deviation used in the gaussian is of the form eta * y_true
# with a small offset to prevent division by zero on invalid pixels
sigma = eta * y_true + 1e-6
# Below we assume the random error in y_true used to regularize the divergence
# increases linearly with distance
error = y_true - y_pred
error = error*mask_true
x = - tf.math.pow(error / sigma, 2.0) / 2.0
loss = - tf.math.exp(x)
# Average over the number of valid pixels
loss = tf.reduce_sum(loss) / num_valid_pixels
return loss, tf.no_op()
|
a39e4caa12304f43512f843034c143711797e5f8
| 30,382 |
import copy
def create_registration_data(legal_type, identifier='FM1234567', tax_id=None):
"""Test data for registration."""
person_json = {
'officer': {
'id': 2,
'firstName': 'Peter',
'lastName': 'Griffin',
'middleName': '',
'partyType': 'person'
},
'mailingAddress': {
'streetAddress': 'mailing_address - address line one',
'streetAddressAdditional': '',
'addressCity': 'mailing_address city',
'addressCountry': 'CA',
'postalCode': 'H0H0H0',
'addressRegion': 'BC'
}
}
org_json = copy.deepcopy(person_json)
org_json['officer'] = {
'id': 2,
'organizationName': 'Xyz Inc.',
'identifier': 'BC1234567',
'taxId': '123456789',
'email': '[email protected]',
'partyType': 'organization'
}
business = create_business(identifier,
legal_type=legal_type,
legal_name='test-reg-' + legal_type)
if tax_id:
business.tax_id = tax_id
json_filing = {
'filing': {
'header': {
'name': 'registration'
},
'registration': {
}
}
}
filing = create_filing(json_filing=json_filing)
party = create_party(person_json if legal_type == 'SP' else org_json)
role = 'proprietor' if legal_type == 'SP' else 'partner'
create_party_role(business, party, [role])
business.save()
filing.business_id = business.id
filing.save()
return filing.id, business.id
|
d0be4516f8f67a5aaa05365ab47c0258b1e174d1
| 30,385 |
def proctored_exam_results_csv(entry_id, xmodule_instance_args):
"""
Compute proctored exam results report for a course and upload the
CSV for download.
"""
action_name = 'generating_proctored_exam_results_report'
task_fn = partial(upload_proctored_exam_results_report, xmodule_instance_args)
return run_main_task(entry_id, task_fn, action_name)
|
e49927963c17c0c7582f4614dbb570760c84fd34
| 30,386 |
def get_twiter_position(twit, market):
"""
Get's Vicki's position on the appropriate stock
:param twit: Twitter API Object
:param market: The market pair which to observe
:type twit: twitter
:type market: str
:return: String contining Vicki's position on the relavant market
:rtype : str
"""
statuses = fetch_timeline(twit)
try:
latest_tweet = ""
for tweet in statuses:
if market in tweet.text:
latest_tweet = tweet.text
break
if 'long' in latest_tweet:
return 'long'
elif 'short' in latest_tweet:
return 'short'
#Generic exceptor used to catch all posible error types when itterating through tweets
except Exception as e:
error_handler('Error itterating through tweets in statuses', e)
|
fb5cf927de81ae39ba913da27e61916315664f4c
| 30,387 |
def build_new_devices_list(module):
"""
Build List of new devices to register in CV.
Structure output:
>>> configlets_get_from_facts(cvp_device)
{
[
{
"name": "veos01",
"configlets": [
"cv_device_test01",
"SYS_TelemetryBuilderV2_172.23.0.2_1",
"veos01-basic-configuration",
"SYS_TelemetryBuilderV2"
],
"cv_configlets": [],
"parentContainerName": "DC1_VEOS",
"imageBundle": []
}
]
}
Parameters
----------
module : AnsibleModule
Ansible module.
Returns
-------
list
List of new devices to provision on CV.
"""
# Get variable from module
devices_filter = module.params["device_filter"]
devices_ansible = module.params["devices"]
device_info = dict()
devices_info = list()
# facts_devices = facts_devices(module)
# Loop in Input devices to see if it is part of CV Facts
for ansible_device_hostname, ansible_device in devices_ansible.items():
if is_in_filter(
hostname_filter=devices_filter, hostname=ansible_device_hostname
):
cvp_device = device_get_from_facts(
module=module, device_name=ansible_device_hostname
)
if len(cvp_device) >= 0:
if is_in_container(device=cvp_device, container="undefined_container"):
device_info = {
"name": ansible_device_hostname,
"parentContainerName": ansible_device["parentContainerName"],
"configlets": ansible_device["configlets"],
"cv_configlets": [],
"imageBundle": ansible_device["imageBundle"],
"message": "Device will be provisionned",
}
devices_info.append(device_info)
return devices_info
|
915ca10ee20c4da5bf1ff4f504ecba1b0f217411
| 30,388 |
from typing import Tuple
def preprocess(input_path: str, image_size: int) -> Tuple[pd.Series, np.ndarray]:
"""
Preprocss imager data into a depth, image tuple.
Image is resized to a given width.
Additionally, to avoid floating point difficulties, depth measurements are converted
to centimeters and integer type.
Parameters
----------
input_path : str
path to an imager file in a RawImagerData format
image_size : int
resulted image width
Returns
-------
Tuple[pd.Series, np.ndarray]
depth measurements, image
"""
data = pd.read_csv(input_path, **RawImagerData.STORAGE_FORMAT['read_params'])
data = data.dropna()
data = RawImagerData.convert(data)
img = Image.fromarray(data.df.iloc[:, 1:].values)
resized_img = img.resize((image_size, img.size[1]))
result_img = np.asarray(resized_img)
depth = (data.df.depth * 10**RawImagerData.DEPTH_PRECISION_DIGITS).astype(np.int64)
return depth, result_img
|
7116555c07fdbb7277d1c9da84b51e83b399dd74
| 30,389 |
def _scale_pot(pot, scale_coeff, numtors):
""" Scale the potential
"""
print('scale_coeff test 0:', scale_coeff, numtors)
scale_factor = scale_coeff**(2.0/numtors)
print('scale_coeff test:', scale_coeff, numtors, scale_factor)
new_pot = {}
for idx, val in pot.items():
new_pot[idx] = pot[idx] * scale_factor
return new_pot
|
0e634b7766a5822d3b2e80fffa0b56dccee125ab
| 30,391 |
import pkg_resources
def get_substation_file():
"""Return the default substation file for the CONUS."""
return pkg_resources.resource_filename('cerf', 'data/hifld_substations_conus_albers.zip')
|
7628c7981dd9f82b4210a451ad62fffa72222fe8
| 30,392 |
def configure_assignment_caller(context, pyramid_request, parsed_params=None):
"""
Call BasicLTILaunchViews.configure_assignment().
Set up the appropriate conditions and then call
BasicLTILaunchViews.configure_assignment(), and return whatever
BasicLTILaunchViews.configure_assignment() returns.
"""
# The document_url, resource_link_id and tool_consumer_instance_guid parsed
# params are always present when configure_assignment() is called.
# ConfigureAssignmentSchema ensures this.
pyramid_request.parsed_params = {
"document_url": "TEST_DOCUMENT_URL",
"resource_link_id": "TEST_RESOURCE_LINK_ID",
"tool_consumer_instance_guid": "TEST_TOOL_CONSUMER_INSTANCE_GUID",
}
if parsed_params:
pyramid_request.parsed_params.update(parsed_params)
views = BasicLTILaunchViews(context, pyramid_request)
return views.configure_assignment()
|
dc607bf0e82a2956a1e435bfd480442cd9b6b920
| 30,393 |
import types
import pandas
import numpy
def hpat_pandas_series_isna(self):
"""
Pandas Series method :meth:`pandas.Series.isna` and :meth:`pandas.Series.isnull` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isna1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_str_isna1
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_isnull1
Parameters
-----------
self : :obj:`pandas.Series` object
input argument
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method isna/isnull().'
if not isinstance(self, SeriesType):
raise TypingError(
'{} The object must be a pandas.series. Given self: {}'.format(_func_name, self))
if isinstance(self.data.dtype, (types.Integer, types.Float)):
def hpat_pandas_series_isna_impl(self):
return pandas.Series(data=numpy.isnan(self._data), index=self._index, name=self._name)
return hpat_pandas_series_isna_impl
if isinstance(self.data.dtype, types.UnicodeType):
def hpat_pandas_series_isna_impl(self):
result = numpy.empty(len(self._data), numpy.bool_)
byte_size = 8
# iterate over bits in StringArrayType null_bitmap and fill array indicating if array's element are NaN
for i in range(len(self._data)):
bmap_idx = i // byte_size
bit_idx = i % byte_size
bmap = self._data.null_bitmap[bmap_idx]
bit_value = (bmap >> bit_idx) & 1
result[i] = bit_value == 0
return pandas.Series(result, index=self._index, name=self._name)
return hpat_pandas_series_isna_impl
|
5a541da044e83e8248446c8b2a0d883213bddd17
| 30,394 |
from typing import Optional
def _filter_stmts(base_node: nodes.NodeNG, stmts, frame, offset):
"""Filter the given list of statements to remove ignorable statements.
If base_node is not a frame itself and the name is found in the inner
frame locals, statements will be filtered to remove ignorable
statements according to base_node's location.
:param stmts: The statements to filter.
:type stmts: list(nodes.NodeNG)
:param frame: The frame that all of the given statements belong to.
:type frame: nodes.NodeNG
:param offset: The line offset to filter statements up to.
:type offset: int
:returns: The filtered statements.
:rtype: list(nodes.NodeNG)
"""
# if offset == -1, my actual frame is not the inner frame but its parent
#
# class A(B): pass
#
# we need this to resolve B correctly
if offset == -1:
myframe = base_node.frame().parent.frame()
else:
myframe = base_node.frame()
# If the frame of this node is the same as the statement
# of this node, then the node is part of a class or
# a function definition and the frame of this node should be the
# the upper frame, not the frame of the definition.
# For more information why this is important,
# see Pylint issue #295.
# For example, for 'b', the statement is the same
# as the frame / scope:
#
# def test(b=1):
# ...
if (
base_node.parent
and base_node.statement(future=True) is myframe
and myframe.parent
):
myframe = myframe.parent.frame()
mystmt: Optional[nodes.Statement] = None
if base_node.parent:
mystmt = base_node.statement(future=True)
# line filtering if we are in the same frame
#
# take care node may be missing lineno information (this is the case for
# nodes inserted for living objects)
if myframe is frame and mystmt and mystmt.fromlineno is not None:
assert mystmt.fromlineno is not None, mystmt
mylineno = mystmt.fromlineno + offset
else:
# disabling lineno filtering
mylineno = 0
_stmts = []
_stmt_parents = []
statements = _get_filtered_node_statements(base_node, stmts)
for node, stmt in statements:
# line filtering is on and we have reached our location, break
if stmt.fromlineno and stmt.fromlineno > mylineno > 0:
break
# Ignore decorators with the same name as the
# decorated function
# Fixes issue #375
if mystmt is stmt and _is_from_decorator(base_node):
continue
if node.has_base(base_node):
break
if isinstance(node, nodes.EmptyNode):
# EmptyNode does not have assign_type(), so just add it and move on
_stmts.append(node)
continue
assign_type = node.assign_type()
_stmts, done = assign_type._get_filtered_stmts(base_node, node, _stmts, mystmt)
if done:
break
optional_assign = assign_type.optional_assign
if optional_assign and assign_type.parent_of(base_node):
# we are inside a loop, loop var assignment is hiding previous
# assignment
_stmts = [node]
_stmt_parents = [stmt.parent]
continue
if isinstance(assign_type, nodes.NamedExpr):
# If the NamedExpr is in an if statement we do some basic control flow inference
if_parent = _get_if_statement_ancestor(assign_type)
if if_parent:
# If the if statement is within another if statement we append the node
# to possible statements
if _get_if_statement_ancestor(if_parent):
optional_assign = False
_stmts.append(node)
_stmt_parents.append(stmt.parent)
# If the if statement is first-level and not within an orelse block
# we know that it will be evaluated
elif not if_parent.is_orelse:
_stmts = [node]
_stmt_parents = [stmt.parent]
# Else we do not known enough about the control flow to be 100% certain
# and we append to possible statements
else:
_stmts.append(node)
_stmt_parents.append(stmt.parent)
else:
_stmts = [node]
_stmt_parents = [stmt.parent]
# XXX comment various branches below!!!
try:
pindex = _stmt_parents.index(stmt.parent)
except ValueError:
pass
else:
# we got a parent index, this means the currently visited node
# is at the same block level as a previously visited node
if _stmts[pindex].assign_type().parent_of(assign_type):
# both statements are not at the same block level
continue
# if currently visited node is following previously considered
# assignment and both are not exclusive, we can drop the
# previous one. For instance in the following code ::
#
# if a:
# x = 1
# else:
# x = 2
# print x
#
# we can't remove neither x = 1 nor x = 2 when looking for 'x'
# of 'print x'; while in the following ::
#
# x = 1
# x = 2
# print x
#
# we can remove x = 1 when we see x = 2
#
# moreover, on loop assignment types, assignment won't
# necessarily be done if the loop has no iteration, so we don't
# want to clear previous assignments if any (hence the test on
# optional_assign)
if not (optional_assign or nodes.are_exclusive(_stmts[pindex], node)):
del _stmt_parents[pindex]
del _stmts[pindex]
# If base_node and node are exclusive, then we can ignore node
if nodes.are_exclusive(base_node, node):
continue
# An AssignName node overrides previous assignments if:
# 1. node's statement always assigns
# 2. node and base_node are in the same block (i.e., has the same parent as base_node)
if isinstance(node, (nodes.NamedExpr, nodes.AssignName)):
if isinstance(stmt, nodes.ExceptHandler):
# If node's statement is an ExceptHandler, then it is the variable
# bound to the caught exception. If base_node is not contained within
# the exception handler block, node should override previous assignments;
# otherwise, node should be ignored, as an exception variable
# is local to the handler block.
if stmt.parent_of(base_node):
_stmts = []
_stmt_parents = []
else:
continue
elif not optional_assign and mystmt and stmt.parent is mystmt.parent:
_stmts = []
_stmt_parents = []
elif isinstance(node, nodes.DelName):
# Remove all previously stored assignments
_stmts = []
_stmt_parents = []
continue
# Add the new assignment
_stmts.append(node)
if isinstance(node, nodes.Arguments) or isinstance(
node.parent, nodes.Arguments
):
# Special case for _stmt_parents when node is a function parameter;
# in this case, stmt is the enclosing FunctionDef, which is what we
# want to add to _stmt_parents, not stmt.parent. This case occurs when
# node is an Arguments node (representing varargs or kwargs parameter),
# and when node.parent is an Arguments node (other parameters).
# See issue #180.
_stmt_parents.append(stmt)
else:
_stmt_parents.append(stmt.parent)
return _stmts
|
744710684fd6f8b3e90e01d93e6533d1fa87c117
| 30,395 |
def lcp_coordinate_conversion(start_coords,end_coords,crs,transform):
"""
Simple Example:
network = lcp.create_raster_network(array)
Parameters:
- 'start_coords' is a list of tuples (lon,lat)
- 'end_coords' is a list of lists of tuples. Each list of end points corresponds to
a start point, so len(start_coords) must equal len(end_coords), although each
list OF end points can be of any length one or greater.
- 'crs' is a Coordinate Reference System of the type returned by rasterio (or neilpy).
- 'transform' is an Affine transformation matrix as returned by rasterio (or neilpy).
Output:
- 'converted_start_coords' is a list of tuples of PIXEL coordinates.
- 'converted_end_coords' is a list of list of tupes of pixel coordiantes.
"""
converted_start_coords = []
converted_end_coords = []
for i,this_start_coord in enumerate(start_coords):
these_end_coords = end_coords[i]
# Convert from lat/lon to map coordinates
this_start_coord = coord_transform(*this_start_coord,4326,crs)
these_end_coords = [coord_transform(*item,4326,crs) for item in these_end_coords]
# Convert from map coordinates to pixel coordinates
this_start_coord = (~transform*this_start_coord)[::-1]
these_end_coords = [(~transform*item)[::-1] for item in these_end_coords]
# Round them to ints
this_start_coord = tuple(np.round(this_start_coord).astype(np.uint32))
these_end_coords = [tuple(item) for item in np.round(these_end_coords).astype(np.uint32)]
converted_start_coords.append(this_start_coord)
converted_end_coords.append(these_end_coords)
return converted_start_coords, converted_end_coords
|
936a1e4df8147786923dea6e87d487ea61af4408
| 30,396 |
def create_large_map(sharing_model):
"""
Create larger map with 7 BS that are arranged in a typical hexagonal structure.
:returns: Tuple(map, bs_list)
"""
map = Map(width=230, height=260)
bs_list = [
# center
Basestation('A', Point(115, 130), get_sharing_for_bs(sharing_model, 0)),
# top left, counter-clockwise
Basestation('B', Point(30, 80), get_sharing_for_bs(sharing_model, 1)),
Basestation('C', Point(115, 30), get_sharing_for_bs(sharing_model, 2)),
Basestation('D', Point(200, 80), get_sharing_for_bs(sharing_model, 3)),
Basestation('E', Point(200, 180), get_sharing_for_bs(sharing_model, 4)),
Basestation('F', Point(115, 230), get_sharing_for_bs(sharing_model, 5)),
Basestation('G', Point(30, 180), get_sharing_for_bs(sharing_model, 6)),
]
return map, bs_list
|
4348bc97177ec18dcaaf7f72f5d439a17a100956
| 30,397 |
def np_scatter_add(input,axis,index,src):
""" numpy wrapper for scatter_add """
th_input = th.as_tensor(input,device="cpu")
th_index = th.as_tensor(index,device="cpu")
th_src = th.as_tensor(src,device="cpu")
dim = axis
th_output = th.scatter_add(th_input,dim,th_index,th_src)
output = th_output.numpy()
return output
|
4575d0d65ae93e403511b4ba6e5b920616c8bb37
| 30,398 |
def netmiko_connect(device_name, device):
"""
Successful connection returns: (True, connect_obj)
Failed authentication returns: (False, None)
"""
hostname = device["host"]
port = device.get("port", 22)
msg = ""
try:
net_connect = ConnectHandler(**device)
msg = f"Netmiko connection succesful to {hostname}:{port}"
logger.info(msg)
return (True, net_connect)
except NetmikoAuthenticationException:
msg = f"Authentication failure to: {device_name} {hostname}:{port}"
except NetmikoTimeoutException as e:
if "DNS failure" in str(e):
msg = (
f"Device {device_name} failed due to a DNS failure, hostname {hostname}"
)
elif "TCP connection to device failed" in str(e):
msg = f"Netmiko was unable to reach the provided host and port: {hostname}:{port}"
logger.error(msg)
return (False, None)
|
f273149ccde031512cd159ca61296ef09bc11d2d
| 30,399 |
def get_tracer(request):
"""
Utility function to retrieve the tracer from the given ``request``.
It is meant to be used only for testing purposes.
"""
return request['__datadog_request_span']._tracer
|
facd1ff0922dcc7743814cfd738d022316ba5d6d
| 30,400 |
def local_self_attention_layer(hparams, prefix):
"""Create self-attention layer based on hyperparameters."""
return transformer_layers.LocalSelfAttention(
num_heads=hparams.get(prefix + "num_heads"),
num_memory_heads=hparams.get(prefix + "num_memory_heads"),
radius=hparams.local_attention_radius,
key_value_size=hparams.d_kv,
shared_kv=hparams.get(prefix + "shared_kv", False),
attention_kwargs=attention_kwargs_from_hparams(hparams))
|
0b49b116cacdec203f531dfb28b389748a84b9b6
| 30,402 |
import pickle
def hwtrain(X_csv: str, y_csv: str, model: str = 'lm') -> str:
""" Read the feature matrix and label vector from training data and fit a
machine learning model. The model is saved in pickle format.
Parameters
----------
X_csv
The path to the feature matrix in CSV format
y_csv
The path to the label vector in CSV format
model
The type of machine learning model
Returns
-------
pickled_model_path
Path to the pickled model
"""
# Read the X and y CSV files
X_train_df = pd.read_csv(X_csv)
y_train_df = pd.read_csv(y_csv)
# Fit the model
if model == 'lm':
estimator = LinearRegression()
else:
raise ValueError('The only available model is "lm"')
estimator.fit(X_train_df, y_train_df)
# Save the model
pickled_model_path = model + '_model.pkl'
with open(pickled_model_path, 'wb') as model_file:
pickle.dump(estimator, model_file)
return pickled_model_path
|
fde68a010249859a95264552bfe767f9c4636d7c
| 30,403 |
def convert_ban_to_quan(str_ban):
"""半角转全角"""
str_quan = ""
for uchar in str_ban:
inside_code = ord(uchar)
if inside_code == 32: #半角空格直接转化
inside_code = 12288
elif inside_code >= 32 and inside_code <= 126: #半角字符(除空格)根据关系转化
inside_code += 65248
#
str_quan += chr(inside_code)
return str_quan
|
b8e7417c0680dcf113377d32dbb2c43738c0af6c
| 30,404 |
import requests
def update_user_permissions(userid, profile="grafana", **kwargs):
"""
Update a user password.
userid
Id of the user.
isGrafanaAdmin
Whether user is a Grafana admin.
profile
Configuration profile used to connect to the Grafana instance.
Default is 'grafana'.
CLI Example:
.. code-block:: bash
salt '*' grafana4.update_user_permissions <user_id> isGrafanaAdmin=<true|false>
"""
if isinstance(profile, string_types):
profile = __salt__["config.option"](profile)
response = requests.put(
"{0}/api/admin/users/{1}/permissions".format(profile["grafana_url"], userid),
json=kwargs,
auth=_get_auth(profile),
headers=_get_headers(profile),
timeout=profile.get("grafana_timeout", 3),
)
if response.status_code >= 400:
response.raise_for_status()
return response.json()
|
4f5b2ed94896dcb0f7d76b066ed7767700ada682
| 30,405 |
def to_xepsilon(_):
"""
:param _:
:return: """
return xepsilon()
|
69e4e478f3e4d7978cb2a5f76aaf12053458c64a
| 30,406 |
def build_permissions_response():
""" Build a response containing only speech """
output = "I'm sorry, I was not able to lookup your home town. "\
"With your permission, I can provide you with this information. "\
"Please check your companion app for details"
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'AskForPermissionsConsent',
'permissions': [
'read::alexa:device:all:address'
]
},
'shouldEndSession': True
}
|
4b01a0fa32958127f7c373b0cedf5d518074e29e
| 30,407 |
def simple_tokenizer(text):
"""
Example for returning a list of terms from text.
1. Normalizes text by casting to lowercase.
2. Removes punctuation from tokens.
3. Returns tokens as indicated by whitespace.
"""
text = text.lower()
# Remove punctuation from file
if isinstance(text, str):
# Python 2+3 unicode strings
text = text.translate(REMOVE_TABLE)
elif isinstance(text, basestring):
# Python 2 old-style strings
text = text.translate(None, REMOVE.encode('utf-8'))
# Split the tokens
return text.split()
|
5b26c2dfdb5cc794fd5238e5a2ecdacb18ae1e19
| 30,408 |
def progress(*args, **kwargs):
"""
The HTML <progress> Element is used to view the completion
progress of a task. While the specifics of how it's displayed is
left up to the browser developer, it's typically displayed as a
progress bar. Javascript can be used to manipulate the value of
progress bar.
"""
return el('progress', *args, **kwargs)
|
f2c41b3b3562485d21c2f1f990f2fa368627c7e7
| 30,409 |
def openssl_error():
"""Return the OpenSSL error type for use in exception clauses"""
return _OpenSSLError
|
f1782d0cdce002b2214ead438f153631afdf51ab
| 30,410 |
def load_path(path, visitor=TokenVisitor):
"""
Args:
path (str): Path to file to deserialize
visitor (type(TokenVisitor)): Visitor to use
Returns:
(list): Deserialized documents
"""
with open(path) as fh:
return deserialized(Scanner(fh), visitor)
|
e938f08c45737ec60f5258f96ea9d5f08053f99e
| 30,411 |
def get_card_names(cards):
"""
:param cards: List of card JSONs
:return: List of card names (str)
"""
names = []
for card in cards:
name = card.get("name")
names.append(name)
return names
|
a30ad1ef7d8beaab0451d6f498254b0b5df3cf6d
| 30,412 |
import platform
def pyversion(ref=None):
"""Determine the Python version and optionally compare to a reference."""
ver = platform.python_version()
if ref:
return [
int(x) for x in ver.split(".")[:2]
] >= [
int(x) for x in ref.split(".")[:2]
]
else: return ver
|
2e31c7710b171ad67e56f9dbc1181685e0f32de1
| 30,413 |
import tqdm
def opt_tqdm(iterable):
"""
Optional tqdm progress bars
"""
try:
except:
return iterable
else:
return tqdm.tqdm(iterable)
|
5bcdde21f706f1eb3907beafc9ddeebf6891e0ec
| 30,414 |
def get_dataset_config():
"""Gets the config for dataset."""
config = config_dict.ConfigDict()
# The path to the specification of grid evaluator.
# If not specified, normal evaluator will be used.
config.grid_evaluator_spec = ''
# The directory of saved mgcdb84 dataset.
config.dataset_directory = ''
# The data types of MGCDB84 dataset to use. If specified, the training and
# validation set will be obtained by partition data with specified type.
# If not specified, training and validation set will be those of MCGDB84.
config.mgcdb84_types = ''
# The fraction of training, validation and set sets.
# Only used if mgcdb84_types is not None. Comma separated string of 3 floats.
config.train_validation_test_split = '0.6,0.2,0.2'
# The targets used for training. Defaults to mgcdb84_ref, which uses target
# values from reference values given by MCGDB84. targets can also be set to
# the exchange-correlation energies of a certain functional, which can be
# specified by an existing functional name in xc_functionals or the path to
# a json file specifying the functional form and parameters.
config.targets = 'mgcdb84_ref'
# The number of targets used for training. Default to 0 (use all targets).
config.num_targets = 0
# If True, only spin unpolarized molecules are used.
config.spin_singlet = False
# The evaluation mode for training, validation and test sets. Possible values
# are jit, onp and jnp. Comma separated string.
config.eval_modes = 'jit,onp,onp'
return config
|
52a026e7c67d09cff9eab3045508f5fb17363db6
| 30,415 |
import pathlib
import tests
import json
import yaml
def stub_multiservo_yaml(tmp_path: pathlib.Path) -> pathlib.Path:
"""Return the path to a servo config file set up for multi-servo execution."""
config_path: pathlib.Path = tmp_path / "servo.yaml"
settings = tests.helpers.BaseConfiguration()
measure_config_json = json.loads(
json.dumps(
settings.dict(
by_alias=True,
)
)
)
optimizer1 = servo.Optimizer(id="dev.opsani.com/multi-servox-1", token="123456789")
optimizer1_config_json = json.loads(
optimizer1.json(
by_alias=True,
)
)
config1 = {
"optimizer": optimizer1_config_json,
"connectors": ["measure", "adjust"],
"measure": measure_config_json,
"adjust": {}
}
optimizer2 = servo.Optimizer(id="dev.opsani.com/multi-servox-2", token="987654321")
optimizer2_config_json = json.loads(
optimizer2.json(
by_alias=True,
)
)
config2 = {
"optimizer": optimizer2_config_json,
"connectors": ["measure", "adjust"],
"measure": measure_config_json,
"adjust": {}
}
config_yaml = yaml.dump_all([config1, config2])
config_path.write_text(config_yaml)
return config_path
|
572d438f406bb1a1d8ced68be6612dced7cdca9b
| 30,416 |
import collections
def rename_internal_nodes(tree, pg_dict):
"""Rename internal nodes (add phylogroups to the name).
"""
numbers = collections.defaultdict(lambda: 0)
for node in tree.traverse("postorder"):
if node.is_leaf():
continue
pgs = node_to_pg(node, pg_dict)
pgs_s = "-".join(sorted(list(pgs), key=_sorting_key))
nname = "PG-{}_{}".format(pgs_s, numbers[pgs_s])
node.name = nname
numbers[pgs_s] += 1
return tree
|
ed77d3d4df42164919a3394fcebc8d49f5bd80eb
| 30,418 |
import math
import torch
import tqdm
def assign_by_euclidian_at_k(X, T, k):
"""
X : [nb_samples x nb_features], e.g. 100 x 64 (embeddings)
k : for each sample, assign target labels of k nearest points
"""
# distances = sklearn.metrics.pairwise.pairwise_distances(X)
chunk_size = 1000
num_chunks = math.ceil(len(X)/chunk_size)
distances = torch.tensor([])
for i in tqdm(range(0, num_chunks)):
chunk_indices = [chunk_size*i, min(len(X), chunk_size*(i+1))]
chunk_X = X[chunk_indices[0]:chunk_indices[1], :]
distance_mat = torch.from_numpy(sklearn.metrics.pairwise.pairwise_distances(X, chunk_X))
distances = torch.cat((distances, distance_mat), dim=-1)
assert distances.shape[0] == len(X)
assert distances.shape[1] == len(X)
distances = distances.numpy()
# get nearest points
indices = np.argsort(distances, axis = 1)[:, 1 : k + 1]
return np.array([[T[i] for i in ii] for ii in indices])
|
31033b8ddf3a2427ec98fdacecb253f6381d38d4
| 30,419 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.