content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def siblings_list():
"""
Shows child element iteration
"""
o = untangle.parse(
"""
<root>
<child name="child1"/>
<child name="child2"/>
<child name="child3"/>
</root>
"""
)
return ",".join([child["name"] for child in o.root.child]) | 06737cb187e18c9fa8b9dc9164720e68f5fd2c36 | 8,738 |
def combine_histogram(old_hist, arr):
""" Collect layer histogram for arr and combine it with old histogram.
"""
new_max = np.max(arr)
new_min = np.min(arr)
new_th = max(abs(new_min), abs(new_max))
(old_hist, old_hist_edges, old_min, old_max, old_th) = old_hist
if new_th <= old_th:
hist, _ = np.histogram(arr,
bins=len(old_hist),
range=(-old_th, old_th))
return (old_hist + hist, old_hist_edges, min(old_min, new_min),
max(old_max, new_max), old_th)
else:
old_num_bins = len(old_hist)
old_step = 2 * old_th / old_num_bins
half_increased_bins = int((new_th - old_th) // old_step + 1)
new_num_bins = half_increased_bins * 2 + old_num_bins
new_th = half_increased_bins * old_step + old_th
hist, hist_edges = np.histogram(arr,
bins=new_num_bins,
range=(-new_th, new_th))
hist[half_increased_bins:new_num_bins -
half_increased_bins] += old_hist
return (hist, hist_edges, min(old_min, new_min), max(old_max,
new_max), new_th) | bc6e6edc9531b07ed347dc0083f86ee921d77c11 | 8,740 |
from typing import Mapping
def unmunchify(x):
""" Recursively converts a Munch into a dictionary.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> sorted(unmunchify(b).items())
[('foo', {'lol': True}), ('hello', 42), ('ponies', 'are pretty!')]
unmunchify will handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = Munch(foo=['bar', Munch(lol=True)], hello=42,
... ponies=('are pretty!', Munch(lies='are trouble!')))
>>> sorted(unmunchify(b).items()) #doctest: +NORMALIZE_WHITESPACE
[('foo', ['bar', {'lol': True}]), ('hello', 42), ('ponies', ('are pretty!', {'lies': 'are trouble!'}))]
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
# Munchify x, using `seen` to track object cycles
seen = dict()
def unmunchify_cycles(obj):
# If we've already begun unmunchifying obj, just return the already-created unmunchified obj
try:
return seen[id(obj)]
except KeyError:
pass
# Otherwise, first partly unmunchify obj (but without descending into any lists or dicts) and save that
seen[id(obj)] = partial = pre_unmunchify(obj)
# Then finish unmunchifying lists and dicts inside obj (reusing unmunchified obj if cycles are encountered)
return post_unmunchify(partial, obj)
def pre_unmunchify(obj):
# Here we return a skeleton of unmunchified obj, which is enough to save for later (in case
# we need to break cycles) but it needs to filled out in post_unmunchify
if isinstance(obj, Mapping):
return dict()
elif isinstance(obj, list):
return type(obj)()
elif isinstance(obj, tuple):
type_factory = getattr(obj, "_make", type(obj))
return type_factory(unmunchify_cycles(item) for item in obj)
else:
return obj
def post_unmunchify(partial, obj):
# Here we finish unmunchifying the parts of obj that were deferred by pre_unmunchify because they
# might be involved in a cycle
if isinstance(obj, Mapping):
partial.update((k, unmunchify_cycles(obj[k])) for k in iterkeys(obj))
elif isinstance(obj, list):
partial.extend(unmunchify_cycles(v) for v in obj)
elif isinstance(obj, tuple):
for (value_partial, value) in zip(partial, obj):
post_unmunchify(value_partial, value)
return partial
return unmunchify_cycles(x) | 90ee373099d46ca80cf78c4d8cca885f2258bce2 | 8,741 |
def split_data(mapping, encoded_sequence):
""" Function to split the prepared data in train and test
Args:
mapping (dict): dictionary mapping of all unique input charcters to integers
encoded_sequence (list): number encoded charachter sequences
Returns:
numpy array : train and test split numpy arrays
"""
encoded_sequence_ = np.array(encoded_sequence)
X, y = encoded_sequence_[:, :-1], encoded_sequence_[:, -1]
y = to_categorical(y, num_classes=len(mapping))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1, random_state=42)
return X_train, X_test, y_train, y_test | b8044b3c1686b37d4908dd28db7cbe9bff2e899a | 8,742 |
def fsp_loss(teacher_var1_name,
teacher_var2_name,
student_var1_name,
student_var2_name,
program=None):
"""Combine variables from student model and teacher model by fsp-loss.
Args:
teacher_var1_name(str): The name of teacher_var1.
teacher_var2_name(str): The name of teacher_var2. Except for the
second dimension, all other dimensions should
be consistent with teacher_var1.
student_var1_name(str): The name of student_var1.
student_var2_name(str): The name of student_var2. Except for the
second dimension, all other dimensions should
be consistent with student_var1.
program(Program): The input distiller program. If not specified,
the default program will be used. Default: None
Returns:
Variable: fsp distiller loss.
"""
if program == None:
program = paddle.static.default_main_program()
teacher_var1 = program.global_block().var(teacher_var1_name)
teacher_var2 = program.global_block().var(teacher_var2_name)
student_var1 = program.global_block().var(student_var1_name)
student_var2 = program.global_block().var(student_var2_name)
teacher_fsp_matrix = paddle.fluid.layers.fsp_matrix(teacher_var1,
teacher_var2)
student_fsp_matrix = paddle.fluid.layers.fsp_matrix(student_var1,
student_var2)
fsp_loss = paddle.mean(
paddle.nn.functional.square_error_cost(student_fsp_matrix,
teacher_fsp_matrix))
return fsp_loss | b8937a64ec8f5e215128c61edee522c9b2cd83d7 | 8,743 |
def diff_numpy_array(A, B):
"""
Numpy Array A - B
return items in A that are not in B
By Divakar
https://stackoverflow.com/a/52417967/1497443
"""
return A[~np.in1d(A, B)] | 72139ba49cf71abd5ea60772143c26f384e0e171 | 8,744 |
def _find_data_between_ranges(data, ranges, top_k):
"""Finds the rows of the data that fall between each range.
Args:
data (pd.Series): The predicted probability values for the postive class.
ranges (list): The threshold ranges defining the bins. Should include 0 and 1 as the first and last value.
top_k (int): The number of row indices per bin to include as samples.
Returns:
list(list): Each list corresponds to the row indices that fall in the range provided.
"""
results = []
for i in range(1, len(ranges)):
mask = data[(data >= ranges[i - 1]) & (data < ranges[i])]
if top_k != -1:
results.append(mask.index.tolist()[: min(len(mask), top_k)])
else:
results.append(mask.index.tolist())
return results | 323986cba953a724f9cb3bad8b2522fc711529e5 | 8,746 |
def validar_entero_n():
"""
"""
try:
n = int(input('n= ')) #si es un float también funciona el programa
except:
print ('Número no válido')
return False
else:
return n | a1238025fd2747c597fc2adf34de441ae6b8055d | 8,747 |
def Conv_Cifar10_32x64x64():
"""A 3 hidden layer convnet designed for 32x32 cifar10."""
base_model_fn = _cross_entropy_pool_loss([32, 64, 64],
jax.nn.relu,
num_classes=10)
datasets = image.cifar10_datasets(batch_size=128)
return _ConvTask(base_model_fn, datasets) | e41e2f0da80f8822187a2ee82dcfe6f70e324213 | 8,748 |
from typing import List
def rotate(angle_list: List, delta: float) -> List:
"""Rotates a list of angles (wraps around at 2 pi)
Args:
angle_list (List): list of angles in pi radians
delta (float): amount to change in pi radians
Returns:
List: new angle list in pi radians
"""
new_angle_list = []
for angle in angle_list:
new_angle = angle + delta
if new_angle >= 2.0:
new_angle -= 2.0
new_angle_list.append(new_angle)
new_angle_list.sort()
return new_angle_list | 560c5138486bd3e67ad956fb2439236a3e3886cc | 8,749 |
def global_average_pooling_3d(tensor: TorchTensorNCX) -> TorchTensorNCX:
"""
3D Global average pooling.
Calculate the average value per sample per channel of a tensor.
Args:
tensor: tensor with shape NCDHW
Returns:
a tensor of shape NC
"""
assert len(tensor.shape) == 5, 'must be a NCDHW tensor!'
return F.avg_pool3d(tensor, tensor.shape[2:]).squeeze(2).squeeze(2).squeeze(2) | 27a73d29fd9dd63b461f2275ed2941bf6bd83348 | 8,750 |
def get_LAB_L_SVD_s(image):
"""Returns s (Singular values) SVD from L of LAB Image information
Args:
image: PIL Image or Numpy array
Returns:
vector of singular values
Example:
>>> from PIL import Image
>>> from ipfml.processing import transform
>>> img = Image.open('./images/test_img.png')
>>> s = transform.get_LAB_L_SVD_s(img)
>>> len(s)
200
"""
L = get_LAB_L(image)
return compression.get_SVD_s(L) | 50a4bd4e4a8b3834baa3aca1f5f1e635baa7a145 | 8,751 |
def path_inclusion_filter_fn(path, param, layer):
"""Returns whether or not layer name is contained in path."""
return layer in path | c93aa83e67c600cd83d053d50fbeaee4f7eebf94 | 8,752 |
from typing import Tuple
def _parse_feature(line: PipelineRecord) -> Tuple[str, Coordinates, Feature]:
""" Creates a Feature from a line of output from a CSVReader """
contig = line[0]
coordinates = parse_coordinates(line[1])
feature = line[2]
# Piler-cr and BLAST both use 1-based indices, but Opfi uses 0-based indices.
# To make both coordinate systems consistent, we subtract 1 from the start
# since feature coordinates come directly from those tools.
# If features are on the reverse strand, the second coordinate will be larger
# than the first, but operon_analyzer assumes the start is always less than the
# end
first_coord, second_coord = parse_coordinates(line[3])
feature_start = min(first_coord, second_coord) - 1
feature_end = max(first_coord, second_coord)
query_orfid = line[4]
strand = int(line[5]) if line[5] else (1 if feature_start < feature_end else -1)
hit_accession = line[6]
hit_eval = float(line[7]) if line[7] else None
description = line[8]
sequence = line[9]
if len(line) > 10:
bit_score = float(line[10]) if line[10] != '' else None
raw_score = int(line[11]) if line[11] != '' else None
aln_len = int(line[12]) if line[12] != '' else None
pident = float(line[13]) if line[13] != '' else None
nident = int(line[14]) if line[14] != '' else None
mismatch = int(line[15]) if line[15] != '' else None
positive = int(line[16]) if line[16] != '' else None
gapopen = int(line[17]) if line[17] != '' else None
gaps = int(line[18]) if line[18] != '' else None
ppos = float(line[19]) if line[19] != '' else None
qcovhsp = int(line[20]) if line[20] != '' else None
contig_filename = line[21] if line[21] else ''
else:
bit_score = None
raw_score = None
aln_len = None
pident = None
nident = None
mismatch = None
positive = None
gapopen = None
gaps = None
ppos = None
qcovhsp = None
contig_filename = None
return contig, contig_filename, coordinates, Feature(
feature,
(feature_start, feature_end),
query_orfid,
strand,
hit_accession,
hit_eval,
description,
sequence,
bit_score,
raw_score,
aln_len,
pident,
nident,
mismatch,
positive,
gapopen,
gaps,
ppos,
qcovhsp) | 201f9c6ed5cd618fc63ec5e07a5b99977f4ef2b0 | 8,753 |
def average_summary_df_tasks(df, avg_columns):
""" Create averages of the summary df across tasks."""
new_df = []
# Columns to have after averaging
keep_cols = ["dataset", "method_name", "trial_number"]
subsetted = df.groupby(keep_cols)
for subset_indices, subset_df in subsetted:
return_dict = {}
return_dict.update(dict(zip(keep_cols, subset_indices)))
for column in avg_columns:
task_values = subset_df[column].values
min_length = min([len(i) for i in task_values])
new_task_values = []
for j in task_values:
j = np.array(j)
if len(j) > min_length:
percentiles = np.linspace(0, len(j) - 1, min_length).astype(int)
new_task_values.append(j[percentiles])
else:
new_task_values.append(j)
avg_task = np.mean(np.array(new_task_values), axis=0).tolist()
return_dict[column] = avg_task
new_df.append(return_dict)
return pd.DataFrame(new_df) | 9c506132cc406a91979777255c092db20d786d12 | 8,754 |
def ml_variance(values, mean):
"""
Given a list of values assumed to come from a normal distribution and
their maximum likelihood estimate of the mean, compute the maximum
likelihood estimate of the distribution's variance of those values.
There are many libraries that do something like this, but they
likely don't do exactly what you want, so you should not use them
directly. (And to be clear, you're not allowed to use them.)
"""
# Your code here
return 1.0 | 440d8d2d2f0a5ed40e01e640aadafb83f16ee14b | 8,755 |
def add_landmarks(particle, d, angle):
"""
Adds a set of landmarks to the particle. Only used on first SLAM cycle
when no landmarks have been added.
:param particle: The particle to be updated
:param d: An array of distances to the landmarks
:param angle: An array of observation angles for the landmarks
:return: Returns the updated particle with landmarks added
"""
# Evaluate sine and cosine values for each observation in z
s = np.sin(pi_2_pi(particle.x[2, 0] + angle))
c = np.cos(pi_2_pi(particle.x[2, 0] + angle))
# Add new landmark locations to mu
particle.mu = np.vstack((particle.mu, np.array(
[particle.x[0, 0] + d * c,
particle.x[1, 0] + d * s]).T))
# Distance values
dpos = np.zeros((len(d), 2))
dpos[:, 0] = d * c # dx
dpos[:, 1] = d * s # dy
d_sq = dpos[:, 0]**2 + dpos[:, 1]**2
d = np.sqrt(d_sq)
H = calc_H(particle, dpos, d_sq, d)
# Add covariance matrices for landmarks
particle.sigma = np.vstack((particle.sigma,
np.linalg.inv(H) @ Q
@ np.linalg.inv(H.transpose((0, 2, 1)))))
particle.i = np.append(particle.i, np.full(len(d), 1))
return particle | d1d168e48f62f60d58e57a79223793108d50dac9 | 8,756 |
def walk_forward_val_multiple(model, ts_list,
history_size=HISTORY_SIZE,
target_size=TARGET_SIZE) -> float:
"""
Conduct walk-forward validation for all states, average the results.
Parameters
----------
model -- The model to be validated
ts_list {list | np.ndarray} -- Array of time series vector
history_size {int} -- The window to use for model input
target_size {int} -- The target prediction window size
Returns
-------
'mse' {float} -- The weighted average MSE across all the states (weighted
by length of time series)
"""
total_error = 0.
total_steps = 0
for ts in ts_list:
mse_state, n_preds = walk_forward_val(model, ts,
history_size=history_size,
target_size=target_size,
return_count=True)
total_error += mse_state * n_preds
total_steps += n_preds
return total_error / total_steps | b3f73ceeddb720fdc7c7d9470a49bccc3c21f81b | 8,757 |
def inverse_project_lambert_equal_area(pt):
"""
Inverse Lambert projections
Parameters:
pt: point, as a numpy array
"""
X = pt[0]
Y = pt[1]
f = np.sqrt(1.0-(X**2.0+Y**2.0)/4)
return tensors.Vector([f*X,f*Y,-1.0+(X**2.0+Y**2.0)/2]) | f8ab5fb44d2d271a8da13623273d8d687d38b772 | 8,759 |
import dataclasses
def _get_field_default(field: dataclasses.Field):
"""
Return a marshmallow default value given a dataclass default value
>>> _get_field_default(dataclasses.field())
<marshmallow.missing>
"""
# Remove `type: ignore` when https://github.com/python/mypy/issues/6910 is fixed
default_factory = field.default_factory # type: ignore
if default_factory is not dataclasses.MISSING:
return default_factory
elif field.default is dataclasses.MISSING:
return marshmallow.missing
return field.default | 0c45e55a1c14cb6b47365ef90cb68e517342dbbc | 8,760 |
from typing import List
from typing import Tuple
import select
def get_all_votes(poll_id: int) -> List[Tuple[str, int]]:
"""
Get all votes for the current poll_id that are stored in the database
Args:
poll_id (int): Telegram's `message_id` for the poll
Returns:
List[Tuple[str, int]]: A list with the current votes in tuples (user, votes)
"""
postgres: Database = get_database()
select_query = (
select([postgres.motos_counter.c.username, postgres.motos_counter.c.vote])
.where(postgres.motos_counter.c.poll_id == poll_id)
.order_by(postgres.motos_counter.c.vote, postgres.motos_counter.c.date)
)
results = postgres.engine.execute(select_query)
return [(row["username"], row["vote"]) for row in results] | cf0ad8ee700a0da70bf29d53d08ab71e08c941ea | 8,762 |
def getUnitConversion():
"""
Get the unit conversion from kT to kJ/mol
Returns
factor: The conversion factor (float)
"""
temp = 298.15
factor = Python_kb/1000.0 * temp * Python_Na
return factor | cb7b33231a53a68358713ce65137cbf13a397923 | 8,763 |
def find_where_and_nearest(array, value):
"""
Returns index and array[index] where value is closest to an array element.
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx, array[idx] | a34ac1d59c8093989978fbca7c2409b241cedd5b | 8,764 |
import numpy
def twoexpdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,1./0.5,logit(0.1)]):
"""
NAME:
twoexpdisk
PURPOSE:
density of a sum of two exponential disks
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,1/hz2,logit(amp2)]
OUTPUT:
density or log density
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
amp= ilogit(params[4])
return (1.-amp)/2.*numpy.fabs(params[1])\
*numpy.exp(-params[0]*(R-_R0)-params[1]*numpy.fabs(z))\
+amp/2.*params[3]*numpy.exp(-params[2]*(R-_R0)-params[3]*numpy.fabs(z)) | bf8c5e0afa28e715846401274941e281a8731f24 | 8,765 |
def sc(X):
"""Silhouette Coefficient"""
global best_k
score_list = [] # 用来存储每个K下模型的平局轮廓系数
silhouette_int = -1 # 初始化的平均轮廓系数阀值
for n_clusters in range(3, 10): # 遍历从2到10几个有限组
model_kmeans = KMeans(n_clusters=n_clusters, random_state=0) # 建立聚类模型对象
cluster_labels_tmp = model_kmeans.fit_predict(X) # 训练聚类模型
silhouette_tmp = metrics.silhouette_score(X, cluster_labels_tmp) # 得到每个K下的平均轮廓系数
score_list.append([n_clusters, silhouette_tmp]) # 将每次K及其得分追加到列表
if silhouette_tmp > silhouette_int: # 如果平均轮廓系数更高
best_k = n_clusters # 将最好的K存储下来
silhouette_int = silhouette_tmp # 将最好的平均轮廓得分存储下来
# best_kmeans = model_kmeans # 将最好的模型存储下来
# cluster_labels_k = cluster_labels_tmp # 将最好的聚类标签存储下来
return best_k | c2898e115db04c1f1ac4d6a7f8c583ea0a8b238e | 8,766 |
import socket
import time
def is_tcp_port_open(host: str, tcp_port: int) -> bool:
"""Checks if the TCP host port is open."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2) # 2 Second Timeout
try:
sock.connect((host, tcp_port))
sock.shutdown(socket.SHUT_RDWR)
except ConnectionRefusedError:
return False
except socket.timeout:
return False
finally:
sock.close()
# Other errors are propagated as odd exceptions.
# We shutdown and closed the connection, but the server may need a second
# to start listening again. If the following error is seen, this timeout
# should be increased. 300ms seems to be the minimum.
#
# Connecting to J-Link via IP...FAILED: Can not connect to J-Link via \
# TCP/IP (127.0.0.1, port 19020)
time.sleep(0.5)
return True | cbe4d0ae58610b863c30b4e1867b47cb1dbdfc3d | 8,767 |
from typing import Callable
from typing import Any
import itertools
def recursive_apply_dict(node: dict, fn: Callable) -> Any:
"""
Applies `fn` to the node, if `fn` changes the node,
the changes should be returned. If the `fn` does not change the node,
it calls `recursive_apply` on the children of the node.
In case the recursion on the children results in one or more
`runtool.datatypes.Versions` objects, the cartesian product of these
versions is calculated and a new `runtool.datatypes.Versions` object will be
returned containing the different versions of this node.
"""
# else merge children of type Versions into a new Versions object
expanded_children = []
new_node = {}
for key, value in node.items():
child = recursive_apply(value, fn)
# If the child is a Versions object, map the key to all its versions,
# child = Versions([1,2]),
# key = ['a']
# ->
# (('a':1), ('a':2))
if isinstance(child, Versions):
expanded_children.append(itertools.product([key], child))
else:
new_node[key] = child
if expanded_children:
# example:
# expanded_children = [(('a':1), ('a':2)), (('b':1), ('b':2))]
# new_node = {"c": 3}
# results in:
# [
# {'a':1, 'b':1, 'c':3},
# {'a':1, 'b':2, 'c':3},
# {'a':2, 'b':1, 'c':3},
# {'a':3, 'b':2, 'c':3},
# ]
new_node = [
fn(
dict(version_of_node, **new_node)
) # apply fn to the new version of the node
for version_of_node in itertools.product(*expanded_children)
]
# if the current node generated Versions object, these
# need to be flattened as well. For example:
# new_node = [Versions([1,2]), Versions([3,4])]
# results in
# Versions([[1,3], [1,4], [2,3], [2,4]])
if all(isinstance(val, Versions) for val in new_node):
return Versions(list(*itertools.product(*new_node)))
return Versions(new_node)
return fn(new_node) | c40daa68caaea02d16511fcc1cd3ee1949c73633 | 8,768 |
import six
def encode_image_array_as_jpg_str(image):
"""Encodes a numpy array into a JPEG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
JPEG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='JPEG')
jpg_string = output.getvalue()
output.close()
return jpg_string | 4c2d27c15c6979678a1c9619a347b7aea5718b2c | 8,769 |
def minify_response(response):
"""Minify response to save bandwith."""
if response.mimetype == u'text/html':
data = response.get_data(as_text=True)
response.set_data(minify(data, remove_comments=True,
remove_empty_space=True,
reduce_boolean_attributes=True))
return response | 29a942d870636337eaf0d125ba6b2ca9945d1d1c | 8,770 |
def get_shorturlhash(myurl):
"""Returns a FNV1a hash of the UNquoted version of the passed URL."""
x = get_hash(unquote(myurl))
return x | f61ef1cfe14fc69a523982888a7b1082244b7bd5 | 8,771 |
def filter_privacy_level(qs, clearance_level, exact=False):
"""
Function to exclude objects from a queryset, which got a higher clearance
level than the wanted maximum clearance level.
:qs: Django queryset.
:clearance_level: Minimum clearance level.
:exact: Boolean to check for the exact clearance level.
"""
if not qs:
return qs
c_type = ContentType.objects.get_for_model(qs.model)
kwargs = {
'content_type': c_type,
'object_id__in': qs.values_list('pk'),
'level__clearance_level{}'.format(
'' if exact else '__gt'): clearance_level,
}
private_objects = PrivacySetting.objects.filter(**kwargs).values_list(
'object_id')
if exact:
return qs.filter(pk__in=private_objects)
return qs.exclude(pk__in=private_objects) | a5fd864b3a9efd86bf40e0a3b966edb047979b2a | 8,772 |
def get_configuration_store(name=None,resource_group_name=None,opts=None):
"""
Use this data source to access information about an existing App Configuration.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.appconfiguration.get_configuration_store(name="existing",
resource_group_name="existing")
pulumi.export("id", example.id)
```
:param str name: The Name of this App Configuration.
:param str resource_group_name: The name of the Resource Group where the App Configuration exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:appconfiguration/getConfigurationStore:getConfigurationStore', __args__, opts=opts).value
return AwaitableGetConfigurationStoreResult(
endpoint=__ret__.get('endpoint'),
id=__ret__.get('id'),
location=__ret__.get('location'),
name=__ret__.get('name'),
primary_read_keys=__ret__.get('primaryReadKeys'),
primary_write_keys=__ret__.get('primaryWriteKeys'),
resource_group_name=__ret__.get('resourceGroupName'),
secondary_read_keys=__ret__.get('secondaryReadKeys'),
secondary_write_keys=__ret__.get('secondaryWriteKeys'),
sku=__ret__.get('sku'),
tags=__ret__.get('tags')) | 4c0baa2cdd089439f1a53415ff9679568a097094 | 8,773 |
def _linear(args, output_size, bias, scope=None, use_fp16=False):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError(
"Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError(
"Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
dtype = [a.dtype for a in args][0]
# Now the computation.
with vs.variable_scope(scope or "Linear"):
matrix = _variable_on_cpu('Matrix', [total_arg_size, output_size],
use_fp16=use_fp16)
if use_fp16:
dtype = tf.float16
else:
dtype = tf.float32
args = [tf.cast(x, dtype) for x in args]
if len(args) == 1:
res = math_ops.matmul(args[0], matrix)
else:
res = math_ops.matmul(array_ops.concat(args, 1), matrix)
if not bias:
return res
bias_term = _variable_on_cpu('Bias', [output_size],
tf.constant_initializer(0),
use_fp16=use_fp16)
return res + bias_term | d8daafaf1dfab0bc6425aef704543833bfbf731a | 8,774 |
def find_CI(method, samples, weights=None, coverage=0.683,
logpost=None, logpost_sort_idx=None,
return_point_estimate=False, return_coverage=False,
return_extras=False, options=None):
"""Compute credible intervals and point estimates from samples.
Arguments
---------
method : str
Method to compute CI. Options are "PJ-HPD", "tail CI", "std", and
"HPD".
PJ-HPD: Compute the CI from the joint posterior HPD region such that
the projected range of the HPDR has coverage ``coverage``.
See Joachimi et al. 2020.
The point estimate is the joint posterior MAP.
tail CI: This is the usual quantile CI. I.e., for CI (l,u) and
coverage c, P(x<l) = (1-c)/2 and P(x>u) = 1-(1-c)/2.
The point estimate is the median.
std: Compute the CI as (mean - n_sigma*std, mean + n_sigma*std).
``n_sigma`` is the number of standard devations that cover
``coverage`` in a normal distribution.
The point estimate is the mean.
HPD: Compute the HPDI of the samples.
The point estimate is the MAP.
samples : array
Samples to use.
weights : array, optional
Sample weights.
coverage : float, optional
Target coverage. This gets converted into sigmas. Default: 0.683.
logpost : array, optional
Array of the log posterior values of the samples. Required for method
``PJ-HPD``.
logpost_sort_idx : array, optional
Array of indices that sort the samples in descending posterior value.
If method is ``PJ-HPD`` and it is not provided, this will be computed
internally from logpost.
return_point_estimate : bool, optional
Whether to return the point_estimate.
return_coverage : bool, optional
Whether to return the actual coverage of the CI.
options : dict, optional
Additional options passed to the CI methods.
Returns
-------
(l, u) : tuple
Credible interval of the samples.
p : float
Point estimate. Only returned if return_point_estimate is true.
coverage : float
The achieved coverage of the returned CI.
"""
options = options or {}
extras = None
if method.lower() == "pj-hpd" or method.lower() == "projected joint hpd":
if logpost is None and logpost_sort_idx is None:
raise ValueError("For method PJ-HPD, either logpost or "
"logpost_sort_idx need to be specified.")
CI, MAP, alpha, n_sample = find_projected_joint_HPDI(
samples, weights,
coverage_1d_threshold=coverage,
sort_idx=logpost_sort_idx,
log_posterior=logpost,
return_map=True, return_coverage_1d=True,
return_n_sample=True,
**options)
point_estimate = MAP
extras = n_sample
elif method.lower() == "hpd" or method.lower() == "m-hpd":
CI, marg_MAP, alpha, no_constraints = find_marginal_HPDI(
samples, weights,
coverage=coverage,
return_map=True,
return_coverage=True,
check_prior_edges=True,
**options)
point_estimate = marg_MAP
extras = no_constraints
elif method.lower() == "tail ci" or method.lower() == "quantile ci":
CI, marg_median, alpha = find_quantile_CI(
samples, weights,
coverage=coverage,
return_median=True, return_coverage=True)
point_estimate = marg_median
elif method.lower() == "std":
CI, marg_mean, alpha = find_std_CI(
samples, weights, coverage=coverage,
return_mean=True, return_coverage=True)
point_estimate = marg_mean
else:
raise NotImplementedError(f"Method {method} not supported.")
result = [CI]
if return_point_estimate:
result += [point_estimate]
if return_coverage:
result += [alpha]
if return_extras:
result += [extras]
if len(result) == 1:
# Only CI
return result[0]
else:
return tuple(result) | 6b5ab3ac47f4f4a0251862946336948fd2ff66ed | 8,775 |
def load_csv(filename, fields=None, y_column=None, sep=','):
""" Read the csv file."""
input = pd.read_csv(filename, skipinitialspace=True,
usecols=fields, sep=sep, low_memory=False)
input = input.dropna(subset=fields)
# dtype={"ss_list_price": float, "ss_wholesale_cost": float}
input_data = input.values
data = DataSource()
if y_column == None:
data.features = input_data[:, :-1]
data.labels = input_data[:, -1]
data.headers = input.keys()
else:
data.features = np.delete(
input_data, [y_column], axis=1) # input_data[:, :-1]
data.labels = input_data[:, y_column]
headers = np.array(input.keys())
data.headers = list(np.delete(headers, [y_column]))
data.headers.append(input.keys()[y_column])
# print(data.headers)
try:
data.file = filename.split("/")[-1]
except Exception:
data.file = filename
return data | 126b96e94f4a5ab201460b427828807cf31eb6ae | 8,776 |
def Normalize(array):
"""Normalizes numpy arrays into scale 0.0 - 1.0"""
array_min, array_max = array.min(), array.max()
return ((array - array_min)/(array_max - array_min)) | a8f3bae56f8e17aed80f8e41030d049a69ac8cae | 8,777 |
def obtener_cantidad_anualmente(PaisDestino, AnioInicio, AnioFin):
"""
Obtener cantidad de vuelos entrantes anualmente dado un pais destino y un rango de años
Obtiene la cantidad total de vuelos entrantes de cada año
:param PaisDestino: Pais al que llegan los vuelos
:type PaisDestino: str
:param AnioInicio: Anio Inicio
:type AnioInicio: int
:param AnioFin: Anio Fin
:type AnioFin: int
:rtype: Dict[str, int]
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerDatosVuelosEntrantesAenaDadoPaisDestinoAnioMinMax(PaisDestino, AnioInicio, AnioFin)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval | 8364a08f4b42124b70a7769dc6a9649cdd9841d7 | 8,778 |
def calculate_shap_for_test(training_data, y, pipeline, n_points_to_explain):
"""Helper function to compute the SHAP values for n_points_to_explain for a given pipeline."""
points_to_explain = training_data[:n_points_to_explain]
pipeline.fit(training_data, y)
return _compute_shap_values(pipeline, pd.DataFrame(points_to_explain), training_data) | d8a88b3c9af05a8274a0cca1a0e63c3a9faaa8d0 | 8,779 |
def read_num_write(input_string):
""" read in the number of output files
"""
pattern = ('NumWrite' +
one_or_more(SPACE) + capturing(INTEGER))
block = _get_training_data_section(input_string)
keyword = first_capture(pattern, block)
assert keyword is not None
return keyword | 0ee1a9ac178eb4c49a01a36208e4c59d6b9023bc | 8,780 |
import requests
import json
def stock_zh_a_minute(symbol: str = 'sh600751', period: str = '5', adjust: str = "") -> pd.DataFrame:
"""
股票及股票指数历史行情数据-分钟数据
http://finance.sina.com.cn/realstock/company/sh600519/nc.shtml
:param symbol: sh000300
:type symbol: str
:param period: 1, 5, 15, 30, 60 分钟的数据
:type period: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据;
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
url = "https://quotes.sina.cn/cn/api/jsonp_v2.php/=/CN_MarketDataService.getKLineData"
params = {
"symbol": symbol,
"scale": period,
"datalen": "1023",
}
r = requests.get(url, params=params)
temp_df = pd.DataFrame(json.loads(r.text.split('=(')[1].split(");")[0])).iloc[:, :6]
try:
stock_zh_a_daily(symbol=symbol, adjust="qfq")
except:
return temp_df
if adjust == "":
return temp_df
if adjust == "qfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
need_df = temp_df[temp_df["time"] == "15:00:00"]
need_df.index = need_df["date"]
stock_zh_a_daily_qfq_df = stock_zh_a_daily(symbol=symbol, adjust="qfq")
result_df = stock_zh_a_daily_qfq_df.iloc[-len(need_df):, :]["close"].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(temp_df, result_df, left_index=True, right_index=True)
merged_df["open"] = merged_df["open"].astype(float) * merged_df["close_y"]
merged_df["high"] = merged_df["high"].astype(float) * merged_df["close_y"]
merged_df["low"] = merged_df["low"].astype(float) * merged_df["close_y"]
merged_df["close"] = merged_df["close_x"].astype(float) * merged_df["close_y"]
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
if adjust == "hfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
need_df = temp_df[temp_df["time"] == "15:00:00"]
need_df.index = need_df["date"]
stock_zh_a_daily_qfq_df = stock_zh_a_daily(symbol=symbol, adjust="hfq")
result_df = stock_zh_a_daily_qfq_df.iloc[-len(need_df):, :]["close"].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(temp_df, result_df, left_index=True, right_index=True)
merged_df["open"] = merged_df["open"].astype(float) * merged_df["close_y"]
merged_df["high"] = merged_df["high"].astype(float) * merged_df["close_y"]
merged_df["low"] = merged_df["low"].astype(float) * merged_df["close_y"]
merged_df["close"] = merged_df["close_x"].astype(float) * merged_df["close_y"]
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df | b54f7dce68e102ebfa6c1784de5ebd49fcb405cb | 8,781 |
import random
def randomize_case(s: str) -> str:
"""Randomize string casing.
Parameters
----------
s : str
Original string
Returns
-------
str
String with it's letters in randomized casing.
"""
result = "".join(
[c.upper() if random.randint(0, 1) == 1 else c.lower() for c in s]
)
# If result contains letters and the result is same as original try again.
if UNICODE_LETTERS_RE.search(s) is not None and result == s:
return randomize_case(s)
else:
return result | 5e00ce336e2886a0d3bd52bc033b02560f0fb9ae | 8,782 |
def _get_results():
"""Run speedtest with speedtest.py"""
s = speedtest.Speedtest()
print("Testing download..")
s.download()
print("Testing upload..")
s.upload()
return s.results.ping, s.results.download, s.results.upload | 7092a5aa7200ebc93e266dbd6b7885095b0433bb | 8,783 |
def findCursor(query, keyname, page_no, page_size):
"""Finds the cursor to use for fetching results from the given page.
We store a mapping of page_no->cursor in memcache. If this result is missing, we look for page_no-1, if that's
missing we look for page_no-2 and so on. Once we've found one (or we get back to page_no=0) then we need to fetch
results from that page forward, storing the results back in memcache as we go.
Args:
query: A query used to fetch data from the data store
keyname: A string that'll make the keys unique (e.g. all blog posts could have keyname='blog'
page_no: The page number we're after
page_size: The size of pages we're after"""
cursor_page = page_no
cursor = memcache.get('post-page-cursor:%s:%d:%d' % (keyname, cursor_page, page_size))
while not cursor:
cursor_page -= 1
if cursor_page == 0:
break
cursor = memcache.get('post-page-cursor:%s:%d:%d' % (keyname, cursor_page, page_size))
while cursor_page < page_no:
# if we have to fast-forward through pages then we'll store the pages in memcache as we go
if cursor_page == 0:
it = query.run()
else:
it = query.with_cursor(cursor)
n = 0
for _ in it:
n += 1
if n >= page_size:
break
cursor = query.cursor()
cursor_page += 1
memcache.set('post-page-cursor:%s:%d:%d' % (keyname, cursor_page, page_size), cursor)
return cursor | 9af3368ef0011d7c6c9758f57bc2c956d540f675 | 8,784 |
def _get_seq(window,variants,ref,genotypeAware):
"""
Using the variation in @variants, construct two haplotypes, one which
contains only homozygous variants, the other which contains both hom and het variants
by placing those variants into the reference base string
@param variants: A vcf_eval.ChromVariants object
@param low: the starting position
@param high: the ending position
@param ref: a parsers.genome object
@param loc: the location that we are trying to rescue
@param genotype: whether to phase hets onto their own sequence to check for genotype accuracy (if there are multiple and they don't overlap, phasing doesn't matter)
@return: a tuple of sequences of bases that comes from modifying the reference sequence with the variants
"""
low = window[0]
high = window[1]
hetChunks = []
homChunks = []
hetOffset = low
homOffset = low
# note: if genotypeAware is False, the het chunks/offset will not be used
def get_ref_bases(start,end):
"""VCF parser is 1-based, but genome is 0-based."""
return ref.ref(window[2],start-1,end-1)
def add_ref_bases_until(chunks,begin,end):
chunks.append(get_ref_bases(begin,end))
def add_alt(chunk,start,var):
add_ref_bases_until(chunk,start,var.pos)
chunk.append(var.alt[0])
for variant in variants:
loc = variant.pos
#print((variant.ref, get_ref_bases(variant.pos,variant.pos+len(variant.ref))))
verifyRefBases = get_ref_bases(variant.pos,variant.pos+len(variant.ref))
if ( variant.ref != verifyRefBases ):
raise RescueError("Variant ref does not match reference at " + window[2] + " " + str(loc) + ": " +variant.ref + " != " + verifyRefBases )
if not ( hetOffset <= loc and homOffset <= loc ):
raise RescueError("Attempted to rescue sequence containing overlapping variants around " + window[2] + " " + str(loc))
assert variant.genotype_type != GENOTYPE_TYPE.HOM_REF
assert variant.genotype_type != GENOTYPE_TYPE.NO_CALL
if ( (not genotypeAware) or variant.genotype_type == GENOTYPE_TYPE.HOM_VAR):
add_alt(homChunks,homOffset,variant)
homOffset = len(variant.ref) + loc
else: # ( variant.genotype_type == GENOTYPE_TYPE.HET )
add_alt(hetChunks,hetOffset,variant)
hetOffset = len(variant.ref) + loc
# NB: this check seems redundant with the assert after it
if ( hetOffset > high or homOffset > high ):
print("-----fail-----")
print(window)
print(map(str,variants))
print((homOffset,high))
assert hetOffset <= high and homOffset <= high
if ( genotypeAware ):
add_ref_bases_until(hetChunks,hetOffset,high)
add_ref_bases_until(homChunks,homOffset,high)
return (''.join(homChunks),''.join(hetChunks)) | 316c19f964c6ce29d52358070d994f0fdfbcc1b8 | 8,785 |
import scipy
def interp_logpsd(data, rate, window, noverlap, freqs, interpolation='linear'):
"""Computes linear-frequency power spectral density, then uses interpolation
(linear by default) to estimate the psd at the desired frequencies."""
stft, linfreqs, times = specgram(data, window, Fs=rate, noverlap=noverlap, window = np.hamming(window))
ntimes = len(times)
logpsd = np.log10(np.abs(stft.T)**2)
interps = [scipy.interpolate.interp1d(linfreqs, logpsd[t,:], kind=interpolation) for t in range(ntimes)]
interped_logpsd = np.array([interps[t](freqs) for t in range(ntimes)])
return interped_logpsd, freqs, times | 0822f776063da9f0797aa898b0305fb295d8c0f1 | 8,786 |
def load_replica_camera_traj(traj_file_path):
"""
the format:
index
"""
camera_traj = []
traj_file_handle = open(traj_file_path, 'r')
for line in traj_file_handle:
split = line.split()
#if blank line, skip
if not len(split):
continue
camera_traj.append(split)
traj_file_handle.close()
return camera_traj | 1879c97ed5ce24834689b156ffdc971b023e67f2 | 8,787 |
def test_model(sess, graph, x_, y_):
"""
:param sess:
:param graph:
:param x_:
:param y_:
:return:
"""
data_len = len(x_)
batch_eval = batch_iter(x_, y_, 64)
total_loss = 0.0
total_acc = 0.0
input_x = graph.get_operation_by_name('input_x').outputs[0]
input_y = graph.get_operation_by_name('input_y').outputs[0]
drop_prob = graph.get_operation_by_name('drop_prob').outputs[0]
loss = graph.get_operation_by_name('loss/loss').outputs[0]
acc = graph.get_operation_by_name('accuracy/acc').outputs[0]
for x_batch, y_batch in batch_eval:
batch_len = len(x_batch)
feed_dict = {input_x: x_batch, input_y: y_batch,
drop_prob: 0}
test_loss, test_acc = sess.run([loss, acc], feed_dict=feed_dict)
total_loss += test_loss * batch_len
total_acc += test_acc * batch_len
return total_loss / data_len, total_acc / data_len | 7c310a7cf979004d9f14fbd1ec57228dbfc81cd2 | 8,788 |
def epanechnikov(h: np.ndarray, Xi: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Epanechnikov kernel.
Parameters:
h : bandwidth.
Xi : 1-D ndarray, shape (nobs, 1). The value of the training set.
x : 1-D ndarray, shape (1, nbatch). The value at which the kernel density is being estimated.
Returns:
ndarray of shape ``(n_obs, nbatch)``: The kernel_value at each training point for each var.
"""
u = (Xi - x) / h
out = 3 / 4 * (1 - u**2) * (np.abs(u) <= 1)
assert out.shape == (Xi.shape[0], x.shape[1])
return out | 45902e9396661a6c0f8faf9cfc2d017125f6a427 | 8,789 |
def punctuation(chars=r',.\"!@#\$%\^&*(){}\[\]?/;\'`~:<>+=-'):
"""Finds characters in text. Useful to preprocess text. Do not forget
to escape special characters.
"""
return rf'[{chars}]' | b2fd23d8485c3b6d429723a02a95c981982559b5 | 8,790 |
import time
import logging
def log_http_request(f):
"""Decorator to enable logging on an HTTP request."""
level = get_log_level()
def new_f(*args, **kwargs):
request = args[1] # Second argument should be request.
object_type = 'Request'
object_id = time.time()
log_name = object_type + '.' + str(object_id)
setattr(request, 'LOG_ID', object_id)
logger = logging.getLogger(log_name)
logger.setLevel(level)
handler = LogModelHandler(object_type, object_id)
logger.addHandler(handler)
return f(*args, **kwargs)
new_f.func_name = f.func_name
return new_f | ecb62d0501307330fc0a56d8eadfbee8e729adf6 | 8,791 |
def look_at(vertices, eye, at=[0, 0, 0], up=[0, 1, 0]):
"""
"Look at" transformation of vertices.
"""
if (vertices.ndimension() != 3):
raise ValueError('vertices Tensor should have 3 dimensions')
place = vertices.place
# if list or tuple convert to numpy array
if isinstance(at, list) or isinstance(at, tuple):
at = paddle.to_tensor(at, dtype=paddle.float32, place=place)
# if numpy array convert to tensor
elif isinstance(at, np.ndarray):
at = paddle.to_tensor(at).to(place)
elif paddle.is_tensor(at):
at = at.to(place)
if isinstance(up, list) or isinstance(up, tuple):
up = paddle.to_tensor(up, dtype=paddle.float32, place=place)
elif isinstance(up, np.ndarray):
up = paddle.to_tensor(up).to(place)
elif paddle.is_tensor(up):
up = up.to(place)
if isinstance(eye, list) or isinstance(eye, tuple):
eye = paddle.to_tensor(eye, dtype=paddle.float32, place=place)
elif isinstance(eye, np.ndarray):
eye = paddle.to_tensor(eye).to(place)
elif paddle.is_tensor(eye):
eye = eye.to(place)
batch_size = vertices.shape[0]
if eye.ndimension() == 1:
eye = eye[None, :].tile([batch_size, 1])
if at.ndimension() == 1:
at = at[None, :].tile([batch_size, 1])
if up.ndimension() == 1:
up = up[None, :].tile([batch_size, 1])
# prevent paddle no grad error
at.stop_gradient = False
eye.stop_gradient = False
up.stop_gradient = False
# create new axes
# eps is chosen as 0.5 to match the chainer version
z_axis = F.normalize(at - eye, epsilon=1e-5)
x_axis = F.normalize(paddle.cross(up, z_axis), epsilon=1e-5)
y_axis = F.normalize(paddle.cross(z_axis, x_axis), epsilon=1e-5)
# create rotation matrix: [bs, 3, 3]
r = paddle.concat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), axis=1)
# apply
# [bs, nv, 3] -> [bs, nv, 3] -> [bs, nv, 3]
if vertices.shape != eye.shape:
eye = eye[:, None, :]
vertices = vertices - eye
vertices = paddle.matmul(vertices, r.swapaxes(1,2))
return vertices | 10a6b94ecba08fecd829758f9c94765c718a5add | 8,792 |
import types
def _count(expr, pat, flags=0):
"""
Count occurrences of pattern in each string of the sequence or scalar
:param expr: sequence or scalar
:param pat: valid regular expression
:param flags: re module flags, e.g. re.IGNORECASE
:return:
"""
return _string_op(expr, Count, output_type=types.int64,
_pat=pat, _flags=flags) | c4c387f18ac75977a661662dae7606a066242b57 | 8,793 |
def simplex3_vertices():
"""
Returns the vertices of the standard 3-simplex. Each column is a vertex.
"""
v = np.array([
[1, 0, 0],
[-1/3, +np.sqrt(8)/3, 0],
[-1/3, -np.sqrt(2)/3, +np.sqrt(2/3)],
[-1/3, -np.sqrt(2)/3, -np.sqrt(2/3)],
])
return v.transpose() | b10c2c781d1f7ed7050e14f069efd3e0e9a80a2b | 8,794 |
def get_output_msg(status, num_logs):
""" Returnes the output message in accordance to the script status """
if status == EXECUTION_STATE_COMPLETED:
return "Retrieved successfully {} logs that triggered the alert".format(num_logs)
else:
return "Failed to retrieve logs. Please check the script's logs to see what went wrong..." | caec8de737251cc7c386a85a098d73d19617e71a | 8,795 |
import re
def insert_channel_links(message: str) -> str:
"""
Takes a message and replaces all of the channel references with
links to those channels in Slack formatting.
:param message: The message to modify
:return: A modified copy of the message
"""
message_with_links = message
matches = re.findall(r'#[a-z0-9\-_(){}\[\]\'\"/]{1,22}', message)
for match in matches:
channel_name = match[1:]
channel = bot.channels.get(channel_name)
if channel is not None:
channel_link_string = f"<#{channel.id}|{channel.name}>"
message_with_links = message_with_links.replace(match, channel_link_string)
return message_with_links | ce56e81e8eb66dc0f2754141bcfc30f42db50c5a | 8,797 |
def check_int_uuid(uuid):
"""Check that the int uuid i pass is valid."""
try:
converted = UUID(int=uuid, version=4)
except ValueError:
return False
return converted.int == uuid | a0ba7447e6c8cc0c35b68024fb4ade25f0802239 | 8,798 |
def calc_E_E_C_hs_d_t_i(i, device, region, A_A, A_MR, A_OR, L_CS_d_t, L_CL_d_t):
"""暖冷房区画𝑖に設置された冷房設備機器の消費電力量(kWh/h)を計算する
Args:
i(int): 暖冷房区画の番号
device(dict): 暖冷房機器の仕様
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
L_CS_d_t(ndarray): 冷房区画の冷房顕熱負荷
L_CL_d_t(ndarray): 冷房区画の冷房潜熱負荷
Returns:
ndarray: 暖冷房区画𝑖に設置された冷房設備機器の消費電力量(kWh/h)
"""
if device['type'] == 'ルームエアコンディショナー':
# 仕様の取得
A_HCZ_i = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
q_rtd_C = rac_spec.get_q_rtd_C(A_HCZ_i)
e_rtd_C = rac_spec.get_e_rtd_C(device['e_class'], q_rtd_C)
# 電力消費量の計算
E_E_C_d_t_i = rac.calc_E_E_C_d_t(
region=region,
q_rtd_C=q_rtd_C,
e_rtd_C=e_rtd_C,
dualcompressor=device['dualcompressor'],
L_CS_d_t=L_CS_d_t[i - 1],
L_CL_d_t=L_CL_d_t[i - 1]
)
else:
raise ValueError(device['type'])
print('{} E_E_C_d_t_{} = {} [kWh] (L_H_d_t_{} = {} [MJ])'.format(device['type'], i, np.sum(E_E_C_d_t_i), i,
np.sum(L_CS_d_t + L_CL_d_t)))
return E_E_C_d_t_i | 8dbd0119ac90f3847de1f5af05891583a9bda26b | 8,799 |
def shift_time(x, dt):
"""Shift time axis to the left by dt. Used to account for pump & lamp delay"""
x -= dt
return x | c93fdddea8e41221583139dcc7a2d81177ba7c17 | 8,800 |
from datetime import datetime
import json
def eps_xfer(request,client_slug=None,show_slug=None):
"""
Returns all the episodes for a show as json.
Used to synk public url's with the main conference site.
"""
client=get_object_or_404(Client,slug=client_slug)
show=get_object_or_404(Show,client=client,slug=show_slug)
# eps = Episode.objects.filter(show=show)
eps=eps_filters(request.GET).filter(show=show).order_by('start')
if "id" in request.GET:
eps = eps.filter( id=request.GET['id'] )
fields=['id',
'state',
'location', 'location_slug',
'sequence',
'name', 'slug', 'authors', 'description',
'start', 'duration',
'released', 'license', 'tags',
'conf_key', 'conf_url',
'host_url', 'public_url', 'rax_mp4_url',
'archive_url', 'archive_mp4_url',
'twitter_url',
'comment',
]
if request.user.is_authenticated():
fields.extend(['emails', 'edit_key',])
if "fields" in request.GET:
fields_whitelist = request.GET['fields'].split(',')
print(fields_whitelist)
fields = [f for f in fields if f in fields_whitelist]
"""
serializers.serialize("json", eps,
fields=fields, use_natural_foreign_keys=True,
stream=response)
"""
gold_list = ['location', 'location_slug']
ds=[]
for ep in eps:
d = {}
for f in fields:
if f == 'location':
d[f] = ep.location.name
elif f == 'location_slug':
d[f] = ep.location.name = ep.location.slug
else:
d[f]=getattr(ep,f)
# archive_mp4_url is really the url of the page
# make a mp4 url too
# the mp4 link is now:
# https://archive.org/download/pyohio_2019-Changing_Lives_through_Open_Source_Passion_and_Mentoring/Changing_Lives_through_Open_Source_Passion_and_Mentoring.mp4
if 'archive_mp4_url' in d:
d['archive_url'] = d['archive_mp4_url']
d['archive_mp4_url'] = ""
if 'start' in d:
d['start_at'] = (d['start']
- datetime.timedelta(minutes=5)
).strftime('%H:%M %d.%m.%Y')
ds.append(d)
response = HttpResponse(content_type="application/json")
json.dump( ds, response, cls=serializers.json.DjangoJSONEncoder )
return response | 9a6691e0ac750919b5915e45ace0c347aa83cbe3 | 8,801 |
def register(class_, option=None, get_funcs={}):
"""A decorator to register a function as the way to display an object of class_
"""
if option:
key = (class_, option)
else:
key = class_
def decorator(func):
class_function_mapping[key] = (func, get_funcs)
return func
return decorator | c060691dd9e2760905e29a2c643dfa63d4ed029c | 8,802 |
def startup(target: machine.Machine,
workload: str,
count: int = 5,
port: int = 0,
**kwargs):
"""Time the startup of some workload.
Args:
target: A machine object.
workload: The workload to run.
count: Number of containers to start.
port: The port to check for liveness, if provided.
**kwargs: Additional container options.
Returns:
The mean start-up time in seconds.
"""
# Load before timing.
image = target.pull(workload)
netcat = target.pull("netcat")
count = int(count)
port = int(port)
with helpers.Timer() as timer:
for _ in range(count):
if not port:
# Run the container synchronously.
target.container(image, **kwargs).run()
else:
# Run a detached container until httpd available.
with target.container(image, port=port, **kwargs).detach() as server:
(server_host, server_port) = server.address()
target.container(netcat).run(host=server_host, port=server_port)
return timer.elapsed() / float(count) | c53b627d95270aa074f9178e1ebcd6ea49b8eeaa | 8,803 |
def log2_fold_change(df, samp_grps):
"""
calculate fold change - fixed as samp_grps.mean_names[0] over samp_grps.mean_names[1],
where the mean names are sorted alphabetically. The log has already been taken,
so the L2FC is calculated as mean0 - mean1
:param df: expanded and/or filtered dataframe
:param samp_grps: SampleGroups() object
:return: dataframe with fold change column appended, with name as in samp_grps.fc_name
"""
mean1 = samp_grps.mean_names[0]
mean2 = samp_grps.mean_names[1]
df[samp_grps.fc_name] = df[mean1] - df[mean2]
return df | 07fcef6f5143095f4f8f77d0251bbd7ecd486fd9 | 8,805 |
def infer_wheel_units(pos):
"""
Given an array of wheel positions, infer the rotary encoder resolution, encoding type and units
The encoding type varies across hardware (Bpod uses X1 while FPGA usually extracted as X4), and
older data were extracted in linear cm rather than radians.
:param pos: a 1D array of extracted wheel positions
:return units: the position units, assumed to be either 'rad' or 'cm'
:return resolution: the number of decoded fronts per 360 degree rotation
:return encoding: one of {'X1', 'X2', 'X4'}
"""
if len(pos.shape) > 1: # Ensure 1D array of positions
pos = pos.flatten()
# Check the values and units of wheel position
res = np.array([wh.ENC_RES, wh.ENC_RES / 2, wh.ENC_RES / 4])
# min change in rad and cm for each decoding type
# [rad_X4, rad_X2, rad_X1, cm_X4, cm_X2, cm_X1]
min_change = np.concatenate([2 * np.pi / res, wh.WHEEL_DIAMETER * np.pi / res])
pos_diff = np.median(np.abs(np.ediff1d(pos)))
# find min change closest to min pos_diff
idx = np.argmin(np.abs(min_change - pos_diff))
if idx < len(res):
# Assume values are in radians
units = 'rad'
encoding = idx
else:
units = 'cm'
encoding = idx - len(res)
enc_names = {0: 'X4', 1: 'X2', 2: 'X1'}
return units, int(res[encoding]), enc_names[int(encoding)] | 82d1a63c11c31d4de83ba5360def223b85194ef9 | 8,806 |
def extract_tform(landmarks, plane_name):
"""Compute the transformation that maps the reference xy-plane at origin to the GT standard plane.
Args:
landmarks: [landmark_count, 3] where landmark_count=16
plane_name: 'tv' or 'tc'
Returns:
trans_vec: translation vector [3]
quat: quaternions [4]
mat: 4x4 transformation matrix [4, 4]
"""
if plane_name == 'tv':
# Landmarks lying on the TV plane
landmarks_plane = np.vstack((landmarks[1:8], landmarks[12:14]))
# Compute transformation
z_vec, p_plane = fit_plane(landmarks_plane)
landmarks_plane_proj = project_on_plane(landmarks_plane, z_vec, p_plane)
landmarks_line = landmarks_plane_proj[[0, 1, 2, 7, 8], :]
x_vec, p_line = fit_line(landmarks_line)
y_vec = geometry.unit_vector(np.cross(z_vec, x_vec))
# 4x4 transformation matrix
mat = np.eye(4)
mat[:3, :3] = np.vstack((x_vec, y_vec, z_vec)).transpose()
mat[:3, 3] = landmarks_plane_proj[0]
# Quaternions and translation vector
quat = geometry.quaternion_from_matrix(mat[:3, :3])
trans_vec = mat[:3, 3]
elif plane_name == 'tc':
# Landmarks lying on the TC plane
cr = landmarks[10]
cl = landmarks[11]
csp = landmarks[12]
# Compute transformation
csp_cl = cl - csp
csp_cr = cr - csp
z_vec = np.cross(csp_cl, csp_cr)
z_vec = geometry.unit_vector(z_vec)
cr_cl_mid = (cr + cl) / 2.0
x_vec = geometry.unit_vector(cr_cl_mid - csp)
y_vec = geometry.unit_vector(np.cross(z_vec, x_vec))
# 4x4 transformation matrix
mat = np.eye(4)
mat[:3, :3] = np.vstack((x_vec, y_vec, z_vec)).transpose()
mat[:3, 3] = (cr_cl_mid + csp) / 2.0
# Quaternions and translation vector
quat = geometry.quaternion_from_matrix(mat[:3, :3])
trans_vec = mat[:3, 3]
else:
raise ValueError('Invalid plane name.')
return trans_vec, quat, mat | d9d4ed43c9572cdd76b34235e380f22a6eb27d03 | 8,807 |
from typing import TextIO
import csv
def load_events(fhandle: TextIO) -> annotations.Events:
"""Load an URBAN-SED sound events annotation file
Args:
fhandle (str or file-like): File-like object or path to the sound events annotation file
Raises:
IOError: if txt_path doesn't exist
Returns:
Events: sound events annotation data
"""
times = []
labels = []
confidence = []
reader = csv.reader(fhandle, delimiter="\t")
for line in reader:
times.append([float(line[0]), float(line[1])])
labels.append(line[2])
confidence.append(1.0)
events_data = annotations.Events(
np.array(times), "seconds", labels, "open", np.array(confidence)
)
return events_data | 2c2017d754fe12ebd37349b359ba6a92ec115421 | 8,808 |
def set_nan(df, chrom_bed_file):
"""This function will take in a dataframe and chromosome length bed file
and will replace 0's with np.nan according to each chromosome length.
This will fix any issues when calculating Z-scores"""
# Build dictionary of key=chromosome and value=chromosome_length
chrom_length_dict = {}
for v in chrom_bed_file.itertuples():
chrom_length_dict[v[1]] = v[2]
continue
# Iterate through each column
for chrom in df.columns.to_list():
current_chrom_length = chrom_length_dict[str(chrom)]
# Iterate through each value of a column in reverse
for index, value in zip(
reversed(df.index.to_list()),
reversed(df[chrom].to_list())
):
# Check if index is greater than length of chromosome
if index > current_chrom_length:
df.at[index, chrom] = np.nan
else:
break
return df | e90008c42db5a94c8676c941da5832438301a724 | 8,810 |
def configure_smoothing(new_d,smoothing_scans):
"""
# <batchstep method="net.sf.mzmine.modules.peaklistmethods.peakpicking.smoothing.SmoothingModule">
# <parameter name="Peak lists" type="BATCH_LAST_PEAKLISTS"/>
# <parameter name="Filename suffix">smoothed</parameter>
# <parameter name="Filter width">9</parameter>
# <parameter name="Remove original peak list">false</parameter>
# </batchstep>
"""
idx = [i for i,d in enumerate(new_d['batch']['batchstep']) if 'SmoothingModule' in d['@method']][0]
idx2 = [i for i,d in enumerate(new_d['batch']['batchstep'][idx]['parameter']) if 'Filter width' in d['@name']][0]
new_d['batch']['batchstep'][idx]['parameter'][idx2]['#text'] = '%.3f'%(smoothing_scans)
return new_d | 031586cf5dbb9fdf1fb6762a89a988367d172942 | 8,811 |
def contact_us():
""" Contact Us Route
Route to lead to the contact page
Args:
None
Returns:
rendered template for contact_us.html
"""
return render_template('contact_us.html', title='CONP | Contact Us', user=current_user) | 2597038074e8f60e14066f10390a161b15cf7071 | 8,812 |
import pandas
def query_field(boresight, r1=None, r2=None, observatory='apo',
mag_range=None, mag_column=None, database_params=None):
"""Selects Gaia DR2 stars for a field, from the database.
Parameters
----------
boresight : tuple
A tuple with the right ascension and declination of the boresight,
in degrees.
r1,r2 : float
The internal and external radii along which the GFAs are located, in
degrees.
observatory : str
The observatory, used to load the default configuration for the GFAs.
mag_range : tuple
The range of magnitudes used to select stars.
mag_column : str
The name of the magnitude column to query.
database_params : dict
A dictionary of database parameters to create the connection. Can
include ``user``, ``host``, ``port``, and ``dbname``.
Returns
-------
`~pandas.Dataframe`
A dataframe with the selected stars.
"""
obs_data = config[observatory]
r1 = r1 or obs_data['r1']
r2 = r2 or obs_data['r2']
mag_range = mag_range or config['mag_range']
mag_column = mag_column or config['mag_column']
query = ('WITH x AS MATERIALIZED (SELECT source_id, ra, dec, '
'{mag_column}, pmra, pmdec '
'FROM gaia_dr2_source WHERE '
'q3c_radial_query(ra, dec, {ra}, {dec}, {r2}) AND '
'NOT q3c_radial_query(ra, dec, {ra}, {dec}, {r1})) '
'SELECT * FROM x WHERE {mag_column} > {g_min} AND '
'{mag_column} < {g_max};')
query = query.format(ra=boresight[0], dec=boresight[1], r1=r1, r2=r2,
g_min=mag_range[0], g_max=mag_range[1],
mag_column=mag_column)
if database_params is None:
database_params = config['database']
conn_str = ''
for key in database_params:
conn_str += f'{key}={database_params[key]} '
connection = psycopg2.connect(conn_str)
data = pandas.read_sql(query, connection)
connection.close()
return data | c05276ecfac3b33dcc5382cf54e220b416614656 | 8,814 |
def get_throttling_equilibria(simulation_config, input_params, priority_queue=True, dev_team_factor=1.0):
"""
Returns the equilibrium profiles for throttling configuration under analysis.
:param simulation_config:
:param input_params:
:return:
"""
desc_inf003 = "THROTTLING_INF003"
process_configuration_inf003 = dict(simulation_config)
process_configuration_inf003["THROTTLING_ENABLED"] = True
process_configuration_inf003["GATEKEEPER_CONFIG"] = None
process_configuration_inf003["INFLATION_FACTOR"] = 0.03
process_configuration_inf003["SUCCESS_RATE"] = 0.95
if priority_queue and dev_team_factor == 0.5:
filename_inf003 = "INF3.0_PRIQUEUE_True_DEVFACTOR_0.5_equilibrium_results.csv"
filename_inf010 = "INF10.0_PRIQUEUE_True_DEVFACTOR_0.5_equilibrium_results.csv"
filename_inf020 = "INF20.0_PRIQUEUE_True_DEVFACTOR_0.5_equilibrium_results.csv"
elif priority_queue and dev_team_factor == 1.0:
filename_inf003 = "INF3.0_PRIQUEUE_True_DEVFACTOR_1.0_equilibrium_results.csv"
filename_inf010 = "INF10.0_PRIQUEUE_True_DEVFACTOR_1.0_equilibrium_results.csv"
filename_inf020 = "INF20.0_PRIQUEUE_True_DEVFACTOR_1.0_equilibrium_results.csv"
elif not priority_queue and dev_team_factor == 0.5:
filename_inf003 = "INF3.0_PRIQUEUE_False_DEVFACTOR_0.5_equilibrium_results.csv"
filename_inf010 = "INF10.0_PRIQUEUE_False_DEVFACTOR_0.5_equilibrium_results.csv"
filename_inf020 = "INF20.0_PRIQUEUE_False_DEVFACTOR_0.5_equilibrium_results.csv"
elif not priority_queue and dev_team_factor == 1.0:
filename_inf003 = "INF3.0_PRIQUEUE_False_DEVFACTOR_1.0_equilibrium_results.csv"
filename_inf010 = "INF10.0_PRIQUEUE_False_DEVFACTOR_1.0_equilibrium_results.csv"
filename_inf020 = "INF20.0_PRIQUEUE_False_DEVFACTOR_1.0_equilibrium_results.csv"
equilibrium_profiles_inf003 = get_profiles_from_file("csv/" + filename_inf003, scenario_desc=desc_inf003,
input_params=input_params)
desc_inf010 = "THROTTLING_INF010"
process_configuration_inf010 = dict(process_configuration_inf003)
process_configuration_inf010["INFLATION_FACTOR"] = 0.10
equilibrium_profiles_inf010 = get_profiles_from_file("csv/" + filename_inf010, scenario_desc=desc_inf010,
input_params=input_params)
desc_inf020 = "THROTTLING_INF020"
process_configuration_inf020 = dict(process_configuration_inf003)
process_configuration_inf020["INFLATION_FACTOR"] = 0.20
equilibrium_profiles_inf020 = get_profiles_from_file("csv/" + filename_inf020, scenario_desc=desc_inf020,
input_params=input_params)
return [{"desc": desc_inf003,
"simulation_configuration": process_configuration_inf003,
"equilibrium_profiles": equilibrium_profiles_inf003},
{"desc": desc_inf010,
"simulation_configuration": process_configuration_inf010,
"equilibrium_profiles": equilibrium_profiles_inf010},
{"desc": desc_inf020,
"simulation_configuration": process_configuration_inf020,
"equilibrium_profiles": equilibrium_profiles_inf020}] | 4e0f6dd8fa3b0b36b713b33ab1a5aaf8394d4942 | 8,815 |
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__) | 522ae88538d6dd880492292c6f2ef169f3bbd06d | 8,816 |
def clean_key(func):
"""Provides a clean, readable key from the funct name and module path.
"""
module = func.__module__.replace("formfactoryapp.", "")
return "%s.%s" % (module, func.__name__) | 946288cd231148eb39af5d1e7e0b957d9f2131e8 | 8,817 |
def rotY(M, alpha):
"""Rotates polygon M around Y axis by alpha degrees.
M needs to be a Numpy Array with shape (4,N) with N>=1"""
T = np.eye(4)
alpha_radians = np.radians(alpha)
sin = np.sin(alpha_radians)
cos = np.cos(alpha_radians)
T[0,0] = cos
T[2,2] = cos
T[0,2] = sin
T[2,0] = -sin
return np.dot(T,M) | 49e850ff66b3c7877e6d8b4a450baaa6707d4f15 | 8,818 |
def is_image_file(filename):
"""
:param filename:
:return:
"""
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) | 40125478c6440efc9a740d2df57ba2f7bb15a5d1 | 8,819 |
def invert_comp_specifier(comp_specifier):
""" return the opposite (logical negation) of @p comp_specifier """
inverse_map = {
Comparison.Equal: Comparison.NotEqual,
Comparison.Less: Comparison.GreaterOrEqual,
Comparison.LessOrEqual: Comparison.Greater,
Comparison.NotEqual: Comparison.Equal,
Comparison.Greater: Comparison.LessOrEqual,
Comparison.GreaterOrEqual: Comparison.Less,
}
return inverse_map[comp_specifier] | 187392dd1dc7f52c744536e8e372cab752ff8c85 | 8,820 |
import utm
def latlong2utm(point):
"""
This function converts a point from lat long to utm
Input : point : (lat,long)
Output : utm point : (x,y,z, n)
"""
return utm.from_latlon(point[0],point[1]) | 3ee82f9df84b02aa35fa0f2a35ec0916edf30e42 | 8,821 |
def multiply(a,b):
"""
multiply values
Args:
a ([float/int]): any value
b ([float/int]): any value
"""
return a*b | 67a85b1675da48684e9de7e9834d3daa4357699b | 8,822 |
from typing import Tuple
from typing import Dict
from typing import List
import regex
def merge_vocab(pair: Tuple[str, str], input_vocab: Dict[str, int]) -> Tuple[Dict[str, int], List]:
"""
>>> pair = ('w', 'o')
>>> input_vocab = {'b i r d @': 3, 'w o r d @': 7, 'w o g @': 13}
>>> new_vocab, new_pairs = merge_vocab(pair, input_vocab)
>>> new_vocab
{'b i r d @': 3, 'wo r d @': 7, 'wo g @': 13}
>>> new_pairs
[(('wo', 'r'), 7), (('o', 'r'), -7), (('wo', 'g'), 13), (('o', 'g'), -13)]
"""
output_vocab = {}
concat_pair_with_space = ' '.join(pair)
concat_pair_with_space_escaped = regex.escape(concat_pair_with_space)
concat_pair = ''.join(pair)
reg = regex.compile('(^|[^ ]+ )(' + concat_pair_with_space_escaped + ')( [^ ]+|$)')
added_pairs = []
for word in input_vocab:
word_occurences = input_vocab[word]
match = reg.search(word)
while match:
# word changed
if match.group(1) != '':
subtoken_before = match.group(1)[:-1]
added_pairs.append(((subtoken_before, concat_pair), word_occurences))
if pair != (subtoken_before, pair[0]):
added_pairs.append(((subtoken_before, pair[0]), -word_occurences))
if match.group(3) != '':
subtoken_after = match.group(3)[1:]
added_pairs.append(((concat_pair, subtoken_after), word_occurences))
if pair != (pair[1], subtoken_after):
added_pairs.append(((pair[1], subtoken_after), -word_occurences))
start, end = match.span(2)
replacement = concat_pair
word = word[:start] + replacement + word[end:]
match = reg.search(word)
output_vocab[word] = word_occurences
return output_vocab, added_pairs | 15226aa9ebd9cae73e5bd00b60cb1b3bbb5d8e07 | 8,823 |
def visualize_bbox_act(img, bboxes,labels, act_preds,
classes=None,thickness=1,
font_scale=0.4,show=False,
wait_time=0,out_file=None):
"""Show the tracks with opencv."""
assert bboxes.ndim == 2
assert labels.ndim == 1
assert bboxes.shape[0] == labels.shape[0]
assert bboxes.shape[1] == 5
if isinstance(img, str):
img = mmcv.imread(img)
img_shape = img.shape
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
text_width, text_height = 8, 15
for i, (bbox, label) in enumerate(zip(bboxes, labels), 0):
x1, y1, x2, y2 = bbox[:4].astype(np.int32)
score = float(bbox[-1])
# bbox
bbox_color = random_color(label)
bbox_color = [int(255 * _c) for _c in bbox_color][::-1]
cv2.rectangle(img, (x1, y1), (x2, y2), bbox_color, thickness=thickness)
# score
text = '{:.02f}'.format(score)
width = len(text) * text_width
img[y1 - text_height:y1, x1:x1 + width, :] = bbox_color
cv2.putText(
img,
text, (x1, y1 - 2),
cv2.FONT_HERSHEY_COMPLEX,
font_scale,
color=(0, 0, 0))
classes_color = random_color(label + 1)
text = classes[label]
width = len(text) * text_width
img[y1:y1 + text_height, x1:x1 + width, :] = bbox_color
cv2.putText(img,text,
(x1, y1 + text_height - 2),
cv2.FONT_HERSHEY_COMPLEX,
font_scale,color=classes_color)
#background_color = random_color(label + 5)
background_color = [255, 204, 153]
if (act_preds is not None) and (len(bboxes)==len(labels)==len(act_preds)):
for j, act_pred in enumerate(act_preds[i]):
text = '{}: {:.02f}'.format(act_pred[0], act_pred[1])
width = len(text) * (text_width)
img[y1+text_height*(j+2) :y1 + text_height*(j+3), x1:x1 + width, :] = background_color
cv2.putText(img, text,
(x1, y1 + text_height*(j+3) - 2),
cv2.FONT_HERSHEY_COMPLEX,
font_scale, color=classes_color)
if show:
mmcv.imshow(img, wait_time=wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
return img | de67d5acba2b2994ec2b66ae4e7e0c58498ecebe | 8,824 |
def calculate_similarity(subgraph_degrees):
"""
Given a list of subgraph degrees, this function calls the guidance
function and calculates the similarity of a particular node with all it's
non-connected nodes.
:param subgraph_degrees: A list of lists containing the non connected node
and degrees of common neighbours from the subgraph.
:return: A dictionary of similarity of each non-connected node
"""
similarity_dict = []
for nc_node in subgraph_degrees:
similarity = 0
for common_node in nc_node[1]:
# Getting the degree of the common neighbour node from the original
# graph
original_degree = graph.degrees.filter("id = '{}'".format(
common_node.id)).select("degree").collect()
# Getting the degree of the common neighbour node from the subgraph
sub_degree = common_node.degree
# Calling the function to calculate guidance for the common
# neighbour node
guidance = get_guidance(sub_degree, original_degree[0].degree)
# Adding the guidance to the similarity of the non-connected node
similarity += guidance
similarity_dict.append((nc_node[0], similarity))
return similarity_dict | cd4be7c405b2974f35db24dbd7d7db7bdf9a867e | 8,825 |
def balance_thetas(theta_sets_types, theta_sets_values):
"""Repeats theta values such that all thetas lists have the same length """
n_sets = max([len(thetas) for thetas in theta_sets_types])
for i, (types, values) in enumerate(zip(theta_sets_types, theta_sets_values)):
assert len(types) == len(values)
n_sets_before = len(types)
if n_sets_before != n_sets:
theta_sets_types[i] = [types[j % n_sets_before] for j in range(n_sets)]
theta_sets_values[i] = [values[j % n_sets_before] for j in range(n_sets)]
return theta_sets_types, theta_sets_values | 3ca7316a18d57c95adbfbdfec5f5be36f33dc0ea | 8,826 |
def _format_weights(df, col, targets, regs):
"""
Reformat the edge table (target -> regulator) that's output by amusr into a pivoted table that the rest of the
inferelator workflow can handle
:param df: pd.DataFrame
An edge table (regulator -> target) with columns containing model values
:param col:
Which column to pivot into values
:param targets: list
A list of target genes (the index of the output data)
:param regs: list
A list of regulators (the columns of the output data)
:return out: pd.DataFrame [G x K]
A [targets x regulators] dataframe pivoted from the edge dataframe
"""
# Make sure that the value column is all numeric
df[col] = pd.to_numeric(df[col])
# Pivot an edge table into a matrix of values
out = pd.pivot_table(df, index='target', columns='regulator', values=col, fill_value=0.)
# Reindex to a [targets x regulators] dataframe and fill anything missing with 0s
out = out.reindex(targets).reindex(regs, axis=1)
out = out.fillna(value=0.)
return out | b683846d9d059a39280077a714455718bd710670 | 8,827 |
def put_thread(req_thread: ReqThreadPut):
"""Put thread for video to DynamoDB"""
try:
input = thread_input.update_item(req_thread)
res = table.update_item(**input)
return res
except ClientError as err:
err_message = err.response["Error"]["Message"]
raise HTTPException(status_code=404, detail=err_message)
except BaseException as err:
raise HTTPException(status_code=404, detail=str(err)) | dba9fe080451a3cb68365824faf8dbccad03b1b6 | 8,828 |
def check_method(adata):
"""Check that method output fits expected API."""
assert "labels_pred" in adata.obs
return True | 78c1a5181395f1675854333c30bf617c578cc1d4 | 8,830 |
def build_index_block(in_channels,
out_channels,
kernel_size,
stride=2,
padding=0,
groups=1,
norm_cfg=dict(type='BN'),
use_nonlinear=False,
expansion=1):
"""Build an conv block for IndexBlock.
Args:
in_channels (int): The input channels of the block.
out_channels (int): The output channels of the block.
kernel_size (int): The kernel size of the block.
stride (int, optional): The stride of the block. Defaults to 2.
padding (int, optional): The padding of the block. Defaults to 0.
groups (int, optional): The groups of the block. Defaults to 1.
norm_cfg (dict, optional): The norm config of the block.
Defaults to dict(type='BN').
use_nonlinear (bool, optional): Whether use nonlinearty in the block.
If true, a ConvModule with kernel size 1 will be appended and an
``ReLU6`` nonlinearty will be added to the origin ConvModule.
Defaults to False.
expansion (int, optional): Expandsion ratio of the middle channels.
Effective when ``use_nonlinear`` is true. Defaults to 1.
Returns:
nn.Module: The built conv block.
"""
if use_nonlinear:
return nn.Sequential(
ConvModule(
in_channels,
in_channels * expansion,
kernel_size,
stride=stride,
padding=padding,
groups=groups,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU6')),
ConvModule(
in_channels * expansion,
out_channels,
1,
stride=1,
padding=0,
groups=groups,
bias=False,
norm_cfg=None,
act_cfg=None))
else:
return ConvModule(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False,
norm_cfg=None,
act_cfg=None) | 03e15760146ce75f06de64ffd6886fe627afcf9b | 8,831 |
def nodes(*paths, type=None):
"""Call node() on each given path and return the list of results.
nodes('foo', 'bar', ...) is equivalent to
[node('foo'), node('bar'), ...]
"""
return list(map(lambda p: node(p, type=type), paths)) | d1ae50237a275c70b9b9e85684e898494fc6c954 | 8,832 |
def parse_rule(parameter_string):
"""Parse a parameter string into its constituent name, type, and
pattern
For example:
`parse_parameter_string('<param_one:[A-z]>')` ->
('param_one', str, '[A-z]')
:param parameter_string: String to parse
:return: tuple containing
(parameter_name, parameter_type, parameter_pattern)
"""
# We could receive NAME or NAME:PATTERN
if str(parameter_string).startswith('/'):
parameter_string = parameter_string[1:]
parameter_string = str(parameter_string).strip('<>')
name = parameter_string
pattern = 'string'
if ':' in parameter_string:
name, pattern = parameter_string.split(':', 1)
default = (str, pattern)
# Pull from pre-configured types
_type, pattern = REGEX_TYPES.get(pattern, default)
return name, _type, pattern | 881e219ab59c801da078e91cf82ccb15caa7798d | 8,834 |
def plan():
"""
改进方案
:return:
"""
return render_template('plan.htm') | 135d8b003adbe8f6311f781f0d4ff7ed206a81d6 | 8,835 |
def extract_traceback(notebook):
""" Extracts information about an error from the notebook.
Parameters
----------
notebook: :class:`nbformat.notebooknode.NotebookNode`
Executed notebook to find an error traceback.
Returns
-------
bool
Whether the executed notebook has an error traceback.
int or None
Number of a cell with a traceback.
If None, then the notebook doesn't contain an error traceback.
str
Error traceback if exists.
"""
for cell in notebook['cells']:
# Find a cell output with a traceback and extract the traceback
outputs = cell.get('outputs', [])
for output in outputs:
traceback = output.get('traceback', [])
if traceback:
traceback = '\n'.join(traceback)
return True, cell['execution_count'], traceback
return False, None, "" | 9af26f973e6810936eaa68058efcdb7bc145803b | 8,836 |
def get_log() -> str:
"""get_log() -> str
(internal)
"""
return str() | 3e2d7bf82128afc664eded15e6c11f1ed9da45e7 | 8,837 |
def start_server(self, parameters): # pragma: no cover
"""adds the server start to celery's queue
Args:
parameters(dict): The POST JSON parameters
"""
self.update_state(state=CeleryStates.started)
session = ServerSession(parameters)
return session() | c20e2233ee7c1e6b1718b0c4bfbb2b9b5f52e0e1 | 8,838 |
def generate_config(context):
""" Entry point for the deployment resources. """
properties = context.properties
name = properties.get('name', context.env['name'])
project_id = properties.get('project', context.env['project'])
bgp = properties.get('bgp', {'asn': properties.get('asn')})
router = {
'name': context.env['name'],
# https://cloud.google.com/compute/docs/reference/rest/v1/routers
'type': 'gcp-types/compute-v1:routers',
'properties':
{
'name':
name,
'project':
project_id,
'region':
properties['region'],
'bgp': bgp,
'network':
properties.get('networkURL', generate_network_uri(
project_id,
properties.get('network', ''))),
}
}
optional_properties = [
'description',
'bgpPeers',
'interfaces',
'nats',
]
for prop in optional_properties:
append_optional_property(router, properties, prop)
return {
'resources': [router],
'outputs':
[
{
'name': 'name',
'value': name
},
{
'name': 'selfLink',
'value': '$(ref.' + context.env['name'] + '.selfLink)'
},
{
'name':
'creationTimestamp',
'value':
'$(ref.' + context.env['name'] + '.creationTimestamp)'
}
]
} | 506c7ded703b8c00fb9a2a6d7645e9e5d0da6905 | 8,839 |
import time
def cachedmethod(timeout):
"""
Function decorator to enable caching for instance methods.
"""
def _cached(func):
if not(hasattr(func, 'expires')):
func.expires = {}
func.cache = {}
def __cached(self, *args, **kwargs):
if(timeout and func.expires.get(repr(self), 0) < time.time()):
if(repr(self) in func.cache):
del func.cache[repr(self)]
if(repr(self) in func.cache):
return func.cache[repr(self)]
result = func(self, *args, **kwargs)
if(result):
func.cache[repr(self)] = result
func.expires[repr(self)] = time.time() + timeout
return result
return __cached
try:
# see if it's an int
int(timeout)
except TypeError:
func = timeout
timeout = 0
return _cached(func)
return _cached | dd8999a60aa6d92e6b442c7c0661d88cd0e8590e | 8,840 |
def __build_pyramid(models, features):
"""Applies all submodels to each FPN level.
Args:
models (list): List of submodels to run on each pyramid level
(by default only regression, classifcation).
features (list): The FPN features.
Returns:
list: A list of tensors, one for each submodel.
"""
return [__build_model_pyramid(n, m, features) for n, m in models] | 269be978f9aafbdc36b1c9d726171785a85f54a4 | 8,841 |
def get_ap_list():
"""
Method to return list of aps present in the network
"""
return jsonify_params(
CELLULAR_NETWORK.ap_list
) | da0777219025499603425f3147b2897d2bce2da6 | 8,842 |
def _merge_url_rule(rule_before, rule_after):
"""
Merges two url rule parts.
Parameters
----------
rule_before : `None` or `tuple` of `tuple` (`int`, `str`)
First url part if any to join `rule_after` to.
rule_after : `None` or `tuple` of `tuple` (`int`, `str`)
Second url part what's start is extended by `rule_before`.
Returns
-------
merged_rule : `None` or `tuple` of `tuple` (`int`, `str`)
The merged rule.
"""
if rule_before is None:
return rule_after
if rule_after is None:
return rule_before
if rule_after[0] == DUMMY_RULE_PART:
rule_after = rule_after[1:]
return (*rule_before, *rule_after) | 0682734a82b227f746325363652d1c3f378f2e51 | 8,843 |
def create_graphic_model(nodes, edges, gtype):
"""
Create a graphic model given nodes and edges
Parameters
----------
nodes : dict
for each node {key, text, math}
edges : dict
for each edge {key, text, math}
gtype : str [default="text"]
"text" for a verbose version, "math" for a compact version
"""
mod = Digraph()
if gtype == "math":
tindx = 1
else:
tindx = 0
for ckey in nodes.keys():
if ckey == "Like":
cstyle = "filled"
else:
cstyle = None
mod.node(ckey, nodes[ckey][tindx], style=cstyle)
for ckey in edges.keys():
for cval in np.atleast_1d(edges[ckey]):
mod.edge(ckey, cval)
return mod | 028c740cc7fa003642815a8ec0f27154fc6e0dab | 8,845 |
def zero_cross_bounds(arr, dim, num_cross):
"""Find the values bounding an array's zero crossing."""
sign_switch = np.sign(arr).diff(dim)
switch_val = arr[dim].where(sign_switch, drop=True)[num_cross]
lower_bound = max(0.999*switch_val, np.min(arr[dim]))
upper_bound = min(1.001*switch_val, np.max(arr[dim]))
return arr.sel(**{dim: [lower_bound, upper_bound], "method": "backfill"}) | 52d3431c32f61f47223fdccf4c5a85a92589534f | 8,846 |
def remove_tseqs(t: ST_Type) -> ST_Type:
"""
Get just the sseqs and the non-nested types, removing the tseqs
"""
if type(t) == ST_SSeq or type(t) == ST_SSeq_Tuple:
inner_tseqs_removed = remove_tseqs(t.t)
return replace(t, t=inner_tseqs_removed)
elif is_nested(t):
return remove_tseqs(t.t)
else:
return t | 323f9cd3c007c1decf11653091f641dc453d32cb | 8,847 |
def prod_cart(in_list_1: list, in_list_2: list) -> list:
"""
Compute the cartesian product of two list
:param in_list_1: the first list to be evaluated
:param in_list_2: the second list to be evaluated
:return: the prodotto cartesiano result as [[x,y],..]
"""
_list = []
for element_1 in in_list_1:
for element_2 in in_list_2:
_list.append([element_1,element_2])
return _list | 9fdbfc558f5ec3b11c78535b9125e0a1c293035e | 8,848 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.