content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_contour_verts(cn):
"""unpack the SVM contour values"""
contours = []
# for each contour line
for cc in cn.collections:
paths = []
# for each separate section of the contour line
for pp in cc.get_paths():
xy = []
# for each segment of that section
for vv in pp.iter_segments():
xy.append(vv[0])
paths.append(np.vstack(xy))
contours.append(paths)
return contours | 93dc98e758aca4390adf75afa7ef9bede2d2ac1a | 3,658,200 |
def play(data_type, stream_id, name, start=-2, duration=-1, reset=False):
"""
Construct a 'play' message to start receive audio/video data from publishers on the server.
:param data_type: int the RTMP datatype.
:param stream_id: int the stream which the message will be sent on.
:param name: str the name of the stream that is published/recorded on the server.
:param start: N/A.
:param duration: N/A.
:param reset: N/A.
"""
# TODO: Add start, duration, reset(?) Will it work with 'play'?
msg = {'msg': data_type,
'stream_id': stream_id,
'command': [u'play', 0, None, u'' + str(name)]}
return msg | d7c28ba7444e6774427f89d30887cbad97b01cb2 | 3,658,201 |
def _compute_covariances(precisions_chol):
"""Compute covariancess from Cholesky decomposition of the precision matrices.
Parameters
----------
precisions_chol : array-like, shape (n_components, n_features, n_features)
The Cholesky decomposition of the sample precisions.
Returns
-------
covariances : array-like
The covariance matrices corresponding to the given precision
matrices.
"""
n_components, n_features, _ = precisions_chol.shape
covariances = np.empty((n_components, n_features, n_features))
for k, prec_chol in enumerate(precisions_chol):
cov_chol = sl.solve_triangular(prec_chol,
np.eye(n_features), lower=True).T
covariances[k] = np.dot(cov_chol, cov_chol.T)
return covariances | 4268a807ed2d6a61e69bd2f07ebbdbf332e030da | 3,658,202 |
def rad2deg(angle):
"""
Convert radian to degree.
Parameters
----------
angle : float
Angle in radians
Returns
-------
degree : float
Angle in degrees
"""
return (180./PI) * angle | dea3270a96cf82bb136ce4f6e873617245a4bac3 | 3,658,203 |
import csv
def parse_kinetics_splits(level):
"""Parse Kinetics-400 dataset into "train", "val", "test" splits.
Args:
level (int): Directory level of data. 1 for the single-level directory,
2 for the two-level directory.
Returns:
list: "train", "val", "test" splits of Kinetics-400.
"""
def convert_label(s, keep_whitespaces=False):
"""Convert label name to a formal string.
Remove redundant '"' and convert whitespace to '_'.
Args:
s (str): String to be converted.
keep_whitespaces(bool): Whether to keep whitespace. Default: False.
Returns:
str: Converted string.
"""
if not keep_whitespaces:
return s.replace('"', '').replace(' ', '_')
else:
return s.replace('"', '')
def line_to_map(x, test=False):
"""A function to map line string to vid and label.
Args:
x (str): A single line from Kinetics-400 csv file.
test (bool): Indicate whether the line comes from test
annotation file.
Returns:
tuple[str, str]: (vid, label), vid is the video id,
label is the video label.
"""
if test:
# vid = f'{x[0]}_{int(x[1]):06d}_{int(x[2]):06d}'
vid = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
label = -1 # label unknown
return vid, label
else:
vid = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}'
if level == 2:
vid = f'{convert_label(x[0])}/{vid}'
else:
assert level == 1
label = class_mapping[convert_label(x[0])]
return vid, label
train_file = 'data/kinetics400/annotations/kinetics_train.csv'
val_file = 'data/kinetics400/annotations/kinetics_val.csv'
test_file = 'data/kinetics400/annotations/kinetics_test.csv'
csv_reader = csv.reader(open(train_file))
# skip the first line
next(csv_reader)
labels_sorted = sorted(set([convert_label(row[0]) for row in csv_reader]))
class_mapping = {label: i for i, label in enumerate(labels_sorted)}
csv_reader = csv.reader(open(train_file))
next(csv_reader)
train_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open(val_file))
next(csv_reader)
val_list = [line_to_map(x) for x in csv_reader]
csv_reader = csv.reader(open(test_file))
next(csv_reader)
test_list = [line_to_map(x, test=True) for x in csv_reader]
splits = ((train_list, val_list, test_list), )
return splits | ee2521919f9f9c3f499cd28bc6003528eb402d2b | 3,658,204 |
def rotateContoursAbout(contours, about, degrees=90, ccw=True):
"""\
Rotate the given contours the given number of degrees about the point about
in a clockwise or counter-clockwise direction.
"""
rt = Transform.rotationAbout(about, degrees, ccw)
return rt.applyToContours(contours) | c929a8c412f4b3fe9b70c21dde62b0672f575abc | 3,658,205 |
import torch
def coordinate_addition(v, b, h, w, A, B, psize):
"""
Shape:
Input: (b, H*W*A, B, P*P)
Output: (b, H*W*A, B, P*P)
"""
assert h == w
v = v.view(b, h, w, A, B, psize)
coor = torch.arange(h, dtype=torch.float32) / h
coor_h = torch.cuda.FloatTensor(1, h, 1, 1, 1, psize).fill_(0.)
coor_w = torch.cuda.FloatTensor(1, 1, w, 1, 1, psize).fill_(0.)
coor_h[0, :, 0, 0, 0, 0] = coor
coor_w[0, 0, :, 0, 0, 1] = coor
v = v + coor_h + coor_w
v = v.view(b, h * w * A, B, psize)
return v | 9eeb906539a61b887216c59faf3ac2928e999d6c | 3,658,206 |
import uuid
def ticket() -> str:
"""生成请求饿百接口所需的ticket参数"""
return str(uuid.uuid1()).upper() | aaf1135d6ef5e61aa65960c5c38007848cbd0b17 | 3,658,207 |
def create_WchainCNOT_layered_ansatz(qc: qiskit.QuantumCircuit,
thetas: np.ndarray,
num_layers: int = 1):
"""Create WchainCNOT layered ansatz
Args:
- qc (qiskit.QuantumCircuit): init circuit
- thetas (np.ndarray): parameters
- n_layers (Int): numpy of layers
Returns:
- qiskit.QuantumCircuit
"""
n = qc.num_qubits
if isinstance(num_layers, int) != True:
num_layers = (num_layers['num_layers'])
if len(thetas) != num_layers * (n * 3):
raise Exception(
'Number of parameters must be equal n_layers * num_qubits * 3')
for i in range(0, num_layers):
phis = thetas[i * (n * 3):(i + 1) * (n * 3)]
qc = create_WchainCNOT(qc)
qc.barrier()
qc = create_rz_nqubit(qc, phis[:n])
qc = create_rx_nqubit(qc, phis[n:n * 2])
qc = create_rz_nqubit(qc, phis[n * 2:n * 3])
return qc | 488950214a26cdcf1812524511561d19baa9dfc9 | 3,658,208 |
import itertools
def birth_brander():
""" This pipeline operator will add or update a "birth" attribute for
passing individuals.
If the individual already has a birth, just let it float by with the
original value. If it doesn't, assign the individual the current birth
ID, and then increment the global, stored birth count.
We don't increment a birth ID in the ctor because that overall birth
count will bloat due to clone operations. Inserting this operator into
the pipeline will ensure that each individual that passes through is
properly "branded" with a unique birth ID. However, care must be made to
ensure that the initial population is similarly branded.
Provides:
* brand_population() to brand an entire population all at once,
which is useful for branding initial populations.
* brand() for explicitly branding a single individual
:param next_thing: preceding individual in the pipeline
:return: branded individual
"""
# incremented with each birth
num_births = itertools.count()
# sometimes next_thing is a population, so we need this to track that
# the next individual in the population
iterator = None
def brand(individual):
""" brand the given individual
:param individual: to be branded
:return: branded individual
"""
if not hasattr(individual, "birth"):
# Only assign a birth ID if they don't already have one
individual.birth = next(num_births)
return individual
def brand_population(population):
""" We want to brand an entire population in one go
Usually used to brand an initial population is one shot.
:param population: to be branded
:return: branded population
"""
return [brand(i) for i in population]
def do_birth_branding(next_thing):
""" This has the flexibility of being inserted in a pipeline such that
the preceding pipeline is a population or a generator that provides
an individual. It'll flexibly handle either situation.
:param next_thing: either the next individual in the pipeline or a population of individuals to be branded
:return: branded individual
"""
nonlocal num_births
nonlocal iterator
while True:
if is_iterable(next_thing):
# We're being passed in a single individual in a pipeline
next_thing = next(next_thing)
else:
# We're being passed a test_sequence/population
if iterator is None:
iterator = iter(next_thing)
next_thing = next(iterator)
next_thing = brand(next_thing)
yield next_thing
do_birth_branding.brand_population = brand_population
return do_birth_branding | dd2c1ef2e9ac2f56436e10829ca9c0685439ce6d | 3,658,209 |
def find_center_vo(tomo, ind=None, smin=-50, smax=50, srad=6, step=0.5,
ratio=0.5, drop=20, smooth=True):
"""
Find rotation axis location using Nghia Vo's method. :cite:`Vo:14`.
Parameters
----------
tomo : ndarray
3D tomographic data.
ind : int, optional
Index of the slice to be used for reconstruction.
smin, smax : int, optional
Coarse search radius. Reference to the horizontal center of the sinogram.
srad : float, optional
Fine search radius.
step : float, optional
Step of fine searching.
ratio : float, optional
The ratio between the FOV of the camera and the size of object.
It's used to generate the mask.
drop : int, optional
Drop lines around vertical center of the mask.
smooth : bool, optional
Whether to apply additional smoothing or not.
Returns
-------
float
Rotation axis location.
"""
tomo = dtype.as_float32(tomo)
if ind is None:
ind = tomo.shape[1] // 2
_tomo = tomo[:, ind, :]
# Reduce noise by smooth filters. Use different filters for coarse and fine search
_tomo_cs = ndimage.filters.gaussian_filter(_tomo, (3, 1)) if smooth else _tomo
_tomo_fs = ndimage.filters.median_filter(_tomo, (2, 2)) if smooth else _tomo
# Coarse and fine searches for finding the rotation center.
if _tomo.shape[0] * _tomo.shape[1] > 4e6: # If data is large (>2kx2k)
_tomo_coarse = downsample(np.expand_dims(_tomo_cs,1), level=2)[:, 0, :]
init_cen = _search_coarse(_tomo_coarse, smin / 4.0, smax / 4.0, ratio, drop)
fine_cen = _search_fine(_tomo_fs, srad, step, init_cen*4, ratio, drop)
else:
init_cen = _search_coarse(_tomo_cs, smin, smax, ratio, drop)
fine_cen = _search_fine(_tomo_fs, srad, step, init_cen, ratio, drop)
logger.debug('Rotation center search finished: %i', fine_cen)
return fine_cen | ff943940a133b88686c6d40cdc09886b050bf181 | 3,658,210 |
def random_fit_nonnegative(values, n):
"""
Generates n random values using a normal distribution fitted from values used as argument.
Returns only non-negative values.
:param values: array/list to use as model fot the random data
:param n: number of random elements to return
:returns: an array of n random non-negative numbers
"""
values = np.array(values)
mean = np.mean(values)
sd = np.std(values)
random_values = np.empty(0)
offset = 0.05 # 5% offset to compensate values less than 0
while len(random_values) < n:
random_values = np.round(np.random.normal(mean, sd, round(n * (1 + offset))))
random_values = random_values[random_values >= 0]
# If the while loop check fail, next time will try with a larger offset
offset *= 2
# slice n first elements and shape the array to int
return random_values[:n].astype("int") | 9591b87b36c668681873fda1969d1710e7a2dd8b | 3,658,211 |
def connected_components(weak_crossings=None,
strong_crossings=None,
probe_adjacency_list=None,
join_size=None,
channels=None):
"""Find all connected components in binary arrays of threshold crossings.
Parameters
----------
weak_crossings : array
`(n_samples, n_channels)` array with weak threshold crossings
strong_crossings : array
`(n_samples, n_channels)` array with strong threshold crossings
probe_adjacency_list : dict
A dict `{channel: [neighbors]}`
channels : array
An (n_channels,) array with a list of all non-dead channels
join_size : int
The number of samples defining the tolerance in time for
finding connected components
Returns
-------
A list of lists of pairs `(samp, chan)` of the connected components in
the 2D array `weak_crossings`, where a pair is adjacent if the samples are
within `join_size` of each other, and the channels are adjacent in
`probe_adjacency_list`, the channel graph.
Note
----
The channel mapping assumes that column #i in the data array is channel #i
in the probe adjacency graph.
"""
if probe_adjacency_list is None:
probe_adjacency_list = {}
if channels is None:
channels = []
# If the channels aren't referenced at all but exist in 'channels', add a
# trivial self-connection so temporal floodfill will work. If this channel
# is dead, it should be removed from 'channels'.
probe_adjacency_list.update({i: {i} for i in channels
if not probe_adjacency_list.get(i)})
# Make sure the values are sets.
probe_adjacency_list = {c: set(cs)
for c, cs in probe_adjacency_list.items()}
if strong_crossings is None:
strong_crossings = weak_crossings
assert weak_crossings.shape == strong_crossings.shape
# Set of connected component labels which contain at least one strong
# node.
strong_nodes = set()
n_s, n_ch = weak_crossings.shape
join_size = int(join_size or 0)
# An array with the component label for each node in the array
label_buffer = np.zeros((n_s, n_ch), dtype=np.int32)
# Component indices, a dictionary with keys the label of the component
# and values a list of pairs (sample, channel) belonging to that component
comp_inds = {}
# mgraph is the channel graph, but with edge node connected to itself
# because we want to include ourself in the adjacency. Each key of the
# channel graph (a dictionary) is a node, and the value is a set of nodes
# which are connected to it by an edge
mgraph = {}
for source, targets in probe_adjacency_list.items():
# we add self connections
mgraph[source] = targets.union([source])
# Label of the next component
c_label = 1
# For all pairs sample, channel which are nonzero (note that numpy .nonzero
# returns (all_i_s, all_i_ch), a pair of lists whose values at the
# corresponding place are the sample, channel pair which is nonzero. The
# lists are also returned in sorted order, so that i_s is always increasing
# and i_ch is always increasing for a given value of i_s. izip is an
# iterator version of the Python zip function, i.e. does the same as zip
# but quicker. zip(A,B) is a list of all pairs (a,b) with a in A and b in B
# in order (i.e. (A[0], B[0]), (A[1], B[1]), .... In conclusion, the next
# line loops through all the samples i_s, and for each sample it loops
# through all the channels.
for i_s, i_ch in zip(*weak_crossings.nonzero()):
# The next two lines iterate through all the neighbours of i_s, i_ch
# in the graph defined by graph in the case of edges, and
# j_s from i_s-join_size to i_s.
for j_s in range(i_s - join_size, i_s + 1):
# Allow us to leave out a channel from the graph to exclude bad
# channels
if i_ch not in mgraph:
continue
for j_ch in mgraph[i_ch]:
# Label of the adjacent element.
adjlabel = label_buffer[j_s, j_ch]
# If the adjacent element is nonzero we need to do something.
if adjlabel:
curlabel = label_buffer[i_s, i_ch]
if curlabel == 0:
# If current element is still zero, we just assign
# the label of the adjacent element to the current one.
label_buffer[i_s, i_ch] = adjlabel
# And add it to the list for the labelled component.
comp_inds[adjlabel].append((i_s, i_ch))
elif curlabel != adjlabel:
# If the current element is unequal to the adjacent
# one, we merge them by reassigning the elements of the
# adjacent component to the current one.
# samps_chans is an array of pairs sample, channel
# currently assigned to component adjlabel.
samps_chans = np.array(comp_inds[adjlabel],
dtype=np.int32)
# samps_chans[:, 0] is the sample indices, so this
# gives only the samp,chan pairs that are within
# join_size of the current point.
# TODO: is this the right behaviour? If a component can
# have a width bigger than join_size I think it isn't!
samps_chans = samps_chans[i_s - samps_chans[:, 0] <=
join_size]
# Relabel the adjacent samp,chan points with current
# label.
samps, chans = samps_chans[:, 0], samps_chans[:, 1]
label_buffer[samps, chans] = curlabel
# Add them to the current label list, and remove the
# adjacent component entirely.
comp_inds[curlabel].extend(comp_inds.pop(adjlabel))
# Did not deal with merge condition, now fixed it
# seems...
# WARNING: might this "in" incur a performance hit
# here...?
if adjlabel in strong_nodes:
strong_nodes.add(curlabel)
strong_nodes.remove(adjlabel)
# NEW: add the current component label to the set of all
# strong nodes, if the current node is strong.
if curlabel > 0 and strong_crossings[i_s, i_ch]:
strong_nodes.add(curlabel)
if label_buffer[i_s, i_ch] == 0:
# If nothing is adjacent, we have the beginnings of a new
# component, # so we label it, create a new list for the new
# component which is given label c_label,
# then increase c_label for the next new component afterwards.
label_buffer[i_s, i_ch] = c_label
comp_inds[c_label] = [(i_s, i_ch)]
if strong_crossings[i_s, i_ch]:
strong_nodes.add(c_label)
c_label += 1
# Only return the values, because we don't actually need the labels.
comps = [comp_inds[key] for key in comp_inds.keys() if key in strong_nodes]
return comps | d1d4c3393a69e0a30a3bdf2328dd6535aee699d0 | 3,658,212 |
def channel_values(channel_freqs, channel_samples, dt, t):
"""Computes value of channels with given frequencies, samples, sample size and current time.
Args:
channel_freqs (array): 1d array of channel frequencies
channel_samples (array): 2d array of channel samples, the first index being time step and
the second index indexing channel
dt (float): size of each sample
t (float): current time
Returns:
array: array of channel values at the given time
"""
sample_idx = int(t // dt)
if sample_idx >= len(channel_samples):
sample_idx = len(channel_samples) - 1
sample_vals = channel_samples[sample_idx]
return np.real(sample_vals * np.exp(1j * 2 * np.pi * channel_freqs * t)) | cdc555a5ab2d21c0dba71f2f24386144796898c1 | 3,658,213 |
import os
import nibabel as nb
import numpy as np
from scipy.fftpack import fft, ifft
def bandpass_voxels(realigned_file, bandpass_freqs, sample_period = None):
"""
Performs ideal bandpass filtering on each voxel time-series.
Parameters
----------
realigned_file : string
Path of a realigned nifti file.
bandpass_freqs : tuple
Tuple containing the bandpass frequencies. (LowCutoff, HighCutoff)
sample_period : float, optional
Length of sampling period in seconds. If not specified,
this value is read from the nifti file provided.
Returns
-------
bandpassed_file : string
Path of filtered output (nifti file).
"""
def ideal_bandpass(data, sample_period, bandpass_freqs):
#Derived from YAN Chao-Gan 120504 based on REST.
# sample_period = T
# LowCutoff = 10.
# HighCutoff = 15.
# data = x
def nextpow2(n):
x = np.log2(n)
return 2**np.ceil(x)
sample_freq = 1./sample_period
sample_length = data.shape[0]
data_p = np.zeros(nextpow2(sample_length))
data_p[:sample_length] = data
LowCutoff, HighCutoff = bandpass_freqs
if(LowCutoff is None): #No lower cutoff (low-pass filter)
low_cutoff_i = 0
elif(LowCutoff > sample_freq/2.): #Cutoff beyond fs/2 (all-stop filter)
low_cutoff_i = int(data_p.shape[0]/2)
else:
low_cutoff_i = np.ceil(LowCutoff*data_p.shape[0]*sample_period).astype('int')
if(HighCutoff > sample_freq/2. or HighCutoff is None): #Cutoff beyond fs/2 or unspecified (become a highpass filter)
high_cutoff_i = int(data_p.shape[0]/2)
else:
high_cutoff_i = np.fix(HighCutoff*data_p.shape[0]*sample_period).astype('int')
freq_mask = np.zeros_like(data_p, dtype='bool')
freq_mask[low_cutoff_i:high_cutoff_i+1] = True
freq_mask[data_p.shape[0]-high_cutoff_i:data_p.shape[0]+1-low_cutoff_i] = True
f_data = fft(data_p)
f_data[freq_mask != True] = 0.
data_bp = np.real_if_close(ifft(f_data)[:sample_length])
return data_bp
nii = nb.load(realigned_file)
data = nii.get_data().astype('float64')
mask = (data != 0).sum(-1) != 0
Y = data[mask].T
Yc = Y - np.tile(Y.mean(0), (Y.shape[0], 1))
if not sample_period:
hdr = nii.get_header()
sample_period = float(hdr.get_zooms()[3])
# Sketchy check to convert TRs in millisecond units
if sample_period > 20.0:
sample_period /= 1000.0
print 'Frequency filtering using sample period: ', sample_period, 'sec'
Y_bp = np.zeros_like(Y)
for j in range(Y.shape[1]):
Y_bp[:,j] = ideal_bandpass(Yc[:,j], sample_period, bandpass_freqs)
data[mask] = Y_bp.T
img = nb.Nifti1Image(data, header=nii.get_header(), affine=nii.get_affine())
bandpassed_file = os.path.join(os.getcwd(), 'bandpassed_demeaned_filtered.nii.gz')
img.to_filename(bandpassed_file)
return bandpassed_file | 772ae195007347ba24670814d907284180a33225 | 3,658,214 |
def get_small_corpus(num=10000):
"""
获取小型文本库,用于调试网络模型
:param num: 文本库前n/2条对联
:return: 默认返回前500条对联(1000句话)的list
"""
list = getFile('/total_list.json')
return list[:num] | 032ea34eaa6b5e1478e3770c91fa3da3214d907b | 3,658,215 |
from tqdm import tqdm_notebook
def groupby_apply2(df_1, df_2, cols, f, tqdn=True):
"""Apply a function `f` that takes two dataframes and returns a dataframe.
Groups inputs by `cols`, evaluates for each group, and concatenates the result.
"""
d_1 = {k: v for k,v in df_1.groupby(cols)}
d_2 = {k: v for k,v in df_2.groupby(cols)}
if tqdn:
progress = tqdm_notebook
else:
progress = lambda x: x
arr = []
for k in progress(d_1):
arr.append(f(d_1[k], d_2[k]))
return pd.concat(arr) | 082ce61477c116ac421ab086e68b040dfc04ffff | 3,658,216 |
from datetime import datetime
import pytz
def login(request):
"""Logs in the user if given credentials are valid"""
username = request.data['username']
password = request.data['password']
try:
user = User.objects.get(username=username)
except:
user = None
if user is not None:
encoded = user.password
hasher = PBKDF2PasswordHasher()
login_valid = hasher.verify(password, encoded)
if login_valid:
key = username + str(datetime.datetime.now())
key = hasher.encode(key, 'key', 10)
life = datetime.datetime.now() + datetime.timedelta(hours=14)
timezone = pytz.timezone("America/Bogota")
life_aware = timezone.localize(life)
loginsession = LoginSession(key=key, life=life_aware, user=user)
loginsession.save()
request.session['loginsession'] = key
data = {
'success': True,
'key': key
}
return Response(data, status=status.HTTP_200_OK, content_type='application/json')
data = {
'success': False,
'message':"Nombre de usuario o contraseña incorrectos"
}
return Response(data, status=status.HTTP_200_OK, content_type='application/json') | 79add2a805a36cd3339aeecafb7d0af95e42d2e5 | 3,658,217 |
def replace_data_in_gbq_table(project_id, table_id, complete_dataset):
""" replacing data in Google Cloud Table """
complete_dataset.to_gbq(
destination_table=table_id,
project_id=project_id,
credentials=credentials,
if_exists="replace",
)
return None | 1de5464cdce77f94857abe46a93f7b64f5e2dd1e | 3,658,218 |
def default_pubkey_inner(ctx):
"""Default expression for "pubkey_inner": tap.inner_pubkey."""
return get(ctx, "tap").inner_pubkey | 9333a62c0111f28e71c202b5553d7f2a8c4f71ce | 3,658,219 |
def quantize_8(image):
"""Converts and quantizes an image to 2^8 discrete levels in [0, 1]."""
q8 = tf.image.convert_image_dtype(image, tf.uint8, saturate=True)
return tf.cast(q8, tf.float32) * (1.0 / 255.0) | d822ff34b9941c6a812a69766de3483c2348e7da | 3,658,220 |
def get_clients( wlc, *vargs, **kvargs ):
"""
create a single dictionary containing information
about all associated stations.
"""
rsp = wlc.rpc.get_stat_user_session_status()
ret_data = {}
for session in rsp.findall('.//USER-SESSION-STATUS'):
locstat = session.find('.//USER-LOCATION-MEMBER')
ret_data[session.get('mac-addr')] = dict(session.attrib)
ret_data[session.get('mac-addr')].update(locstat.attrib)
return ret_data | c4ab5941033632d7f2b95bc23878f0464d12adb7 | 3,658,221 |
def coalmine(eia923_dfs, eia923_transformed_dfs):
"""Transforms the coalmine_eia923 table.
Transformations include:
* Remove fields implicated elsewhere.
* Drop duplicates with MSHA ID.
Args:
eia923_dfs (dict): Each entry in this dictionary of DataFrame objects
corresponds to a page from the EIA923 form, as reported in the Excel
spreadsheets they distribute.
eia923_transformed_dfs (dict): A dictionary of DataFrame objects in which pages
from EIA923 form (keys) correspond to normalized DataFrames of values from
that page (values).
Returns:
dict: eia923_transformed_dfs, a dictionary of DataFrame objects in which pages
from EIA923 form (keys) correspond to normalized DataFrames of values from that
page (values).
"""
# These are the columns that we want to keep from FRC for the
# coal mine info table.
coalmine_cols = ['mine_name',
'mine_type',
'state',
'county_id_fips',
'mine_id_msha']
# Make a copy so we don't alter the FRC data frame... which we'll need
# to use again for populating the FRC table (see below)
cmi_df = eia923_dfs['fuel_receipts_costs'].copy()
# Keep only the columns listed above:
cmi_df = _coalmine_cleanup(cmi_df)
cmi_df = cmi_df[coalmine_cols]
# If we actually *have* an MSHA ID for a mine, then we have a totally
# unique identifier for that mine, and we can safely drop duplicates and
# keep just one copy of that mine, no matter how different all the other
# fields associated with the mine info are... Here we split out all the
# coalmine records that have an MSHA ID, remove them from the CMI
# data frame, drop duplicates, and then bring the unique mine records
# back into the overall CMI dataframe...
cmi_with_msha = cmi_df[cmi_df['mine_id_msha'] > 0]
cmi_with_msha = cmi_with_msha.drop_duplicates(subset=['mine_id_msha', ])
cmi_df.drop(cmi_df[cmi_df['mine_id_msha'] > 0].index)
cmi_df.append(cmi_with_msha)
cmi_df = cmi_df.drop_duplicates(subset=['mine_name',
'state',
'mine_id_msha',
'mine_type',
'county_id_fips'])
# drop null values if they occur in vital fields....
cmi_df.dropna(subset=['mine_name', 'state'], inplace=True)
# we need an mine id to associate this coalmine table with the frc
# table. In order to do that, we need to create a clean index, like
# an autoincremeted id column in a db, which will later be used as a
# primary key in the coalmine table and a forigen key in the frc table
# first we reset the index to get a clean index
cmi_df = cmi_df.reset_index()
# then we get rid of the old index
cmi_df = cmi_df.drop(labels=['index'], axis=1)
# then name the index id
cmi_df.index.name = 'mine_id_pudl'
# then make the id index a column for simpler transferability
cmi_df = cmi_df.reset_index()
cmi_df = PUDL_META.get_resource("coalmine_eia923").encode(cmi_df)
eia923_transformed_dfs['coalmine_eia923'] = cmi_df
return eia923_transformed_dfs | eb420428dcb2dceeeab1c5bbdceee7c7da2e5c11 | 3,658,222 |
import random
def _throw_object_x_at_y():
"""
Interesting interactions:
* If anything is breakable
:return:
"""
all_pickupable_objects_x = env.all_objects_with_properties({'pickupable': True})
x_weights = [10.0 if (x['breakable'] or x['mass'] > 4.0) else 1.0 for x in all_pickupable_objects_x]
if len(all_pickupable_objects_x) == 0:
raise ValueError('No pickupable objects')
all_objects_y = env.all_objects_with_properties({'pickupable': True})
y_weights = [10.0 if (y['breakable'] and not y['pickupable']) else (
4.0 if y['breakable'] else 1.0) for y in all_objects_y]
object_x = all_pickupable_objects_x[_weighted_choice(x_weights)]
object_y = all_objects_y[_weighted_choice(y_weights)]
if object_x['objectId'] == object_y['objectId']:
raise ValueError('objects are the same?')
#####################
hardness_options = {'softly': 10.0, 'normally': 100.0, 'aggressively': 1000.0}
hardness = random.choice(sorted(hardness_options.keys()))
renv = RecordingEnv(env,
text=f'Throw $1 at $2 {hardness}.',
main_object_ids=(object_x['objectId'], object_y['objectId'])
)
s_a = pickup_object(renv, object_x['objectId'], navigate=True)
print("Pickup {} succeeds".format(object_x['objectId']), flush=True)
path2use = path_to_object(renv.env, object_y, angle_noise=0, dist_to_obj_penalty=0.1)
while len(path2use) > 0 and path2use[-1]['action'].startswith(('Rotate', 'Look')):
path2use.pop(-1)
for p in path2use:
renv.step(p)
# Teleport, throw, then snap back to grid
# Face object
old_pos = renv.env.get_agent_location()
new_pos = {k: v for k, v in old_pos.items()}
new_pos['rotation'] = rotation_angle_to_object(object_y, renv.env.get_agent_location())
new_pos['horizon'] = horizon_angle_to_object(object_y, renv.env.get_agent_location())
renv.env.teleport_agent_to(**new_pos, ignore_y_diffs=True,
only_initially_reachable=False)
if not renv.env.last_action_success:
raise ValueError("teleport failed")
if renv.env.get_agent_location()['y'] < -10:
raise ValueError("negative coords")
s_b = renv.step(dict(action='ThrowObject', moveMagnitude=hardness_options[hardness],
forceAction=True))
# If something broke then things are interesting
is_interesting = s_b and any([(x['isBroken'] or 'Cracked' in x['objectType']) for x in renv.new_items.values()])
renv.env.teleport_agent_to(**old_pos, ignore_y_diffs=True)
return renv, is_interesting | 03f6c6a99754d79d94df5a4f857ae358db663081 | 3,658,223 |
def plot(
X,
color_by=None,
color_map="Spectral",
colors=None,
edges=None,
axis_limits=None,
background_color=None,
marker_size=1.0,
figsize_inches=(8.0, 8.0),
savepath=None,
):
"""Plot an embedding, in one, two, or three dimensions.
This function plots embeddings. The input embedding's dimension should
be at most 3.
The embedding is visualized as a scatter plot. The points can
optionally be colored according to categorical or continuous values,
or according to a pre-defined sequence of colors. Additionally,
edges can optionally be superimposed.
Arguments
---------
X: array-like
The embedding to plot, of shape ``(n_items, embedding_dim)``. The
second dimension should be 1, 2, or 3.
color_by: array-like, optional
A sequence of values, one for each item, which should be
used to color each embedding vector. These values may either
be categorical or continuous. For example, if ``n_items`` is 4,
.. code:: python3
np.ndarray(['dog', 'cat', 'zebra', 'cat'])
np.ndarray([0, 1, 1, 2]
np.ndarray([0.1, 0.5, 0.31, 0.99]
are all acceptable. The first two are treated as categorical,
the third is continuous. A finite number of colors is used
when the values are categorical, while a spectrum of colors is
used when the values are continuous.
color_map: str or matplotlib colormap instance
Color map to use when resolving ``color_by`` to colors; ignored
when ``color_by`` is None.
colors: array-like, optional
A sequence of colors, one for each item, specifying the exact
color each item should be colored. Each row must represent
an RGBA value.
Only one of ``color_by`` and ``colors`` should be non-None.
edges: array-like, optional
List of edges to superimpose over the scatter plot, shape ``(any, 2)``
axis_limits: tuple, optional
tuple ``(limit_low, limit_high)`` of axis limits, applied to both
the x and y axis.
background_color: str, optional
color of background
marker_size: float, optional
size of each point in the scatter plot
figsize_inches: tuple
size of figures in inches: ``(width_inches, height_inches)``
savepath: str, optional
path to save the plot.
Returns
-------
matplotlib.Axes:
Axis on which the embedding is plotted.
"""
if color_by is not None and colors is not None:
raise ValueError("Only one of 'color_by` and `colors` can be non-None")
ax = _plot(
X=X,
color_by=color_by,
cmap=color_map,
colors=colors,
edges=edges,
lim=axis_limits,
background_color=background_color,
s=marker_size,
figsize=figsize_inches,
)
if savepath is not None:
plt.savefig(savepath)
return ax | f6c5ef6084278bcd3eb81c9286af53594aca4a1e | 3,658,224 |
def CausalConvIntSingle(val, time, kernel):
"""
Computing convolution of time varying data with given kernel function.
"""
ntime = time.size
dt_temp = np.diff(time)
dt = np.r_[time[0], dt_temp]
out = np.zeros_like(val)
for i in range(1, ntime):
temp = 0.
if i==0:
temp += val[0]*kernel(time[i]-time[0])*dt[0]*0.5
for k in range(1,i+1):
temp += val[k-1]*kernel(time[i]-time[k-1])*dt[k]*0.5
temp += val[k]*kernel(time[i]-time[k])*dt[k]*0.5
out[i] = temp
return out | a4e94bfe2213c428042df4e561f584ffede3f9ab | 3,658,225 |
def sbox1(v):
"""AES inverse S-Box."""
w = mpc.to_bits(v)
z = mpc.vector_add(w, B)
y = mpc.matrix_prod([z], A1, True)[0]
x = mpc.from_bits(y)**254
return x | c10e9d440e1c1149c8d2b0f9fbd3fd5d4868596c | 3,658,226 |
def _get_photon_info_COS(tag, x1d, traceloc='stsci'):
"""
Add spectral units (wavelength, cross dispersion distance, energy/area)
to the photon table in the fits data unit "tag".
For G230L, you will get several 'xdisp' columns -- one for each segment. This allows for the use of overlapping
background regions.
Parameters
----------
tag
x1d
traceloc
Returns
-------
xdisp, order
"""
if x1d is not None:
xd, xh = x1d[1].data, x1d[1].header
det = tag[0].header['detector']
segment = tag[0].header['segment']
data_list = []
for i,t in enumerate(tag):
if t.name != 'EVENTS': continue
td,th = t.data, t.header
"""
Note: How STScI extracts the spectrum is unclear. Using 'y_lower/upper_outer' from the x1d reproduces the
x1d gross array, but these results in an extraction ribbon that has a varying height and center -- not
the parallelogram that is described in the Data Handbook as of 2015-07-28. The parameters in the
xtractab reference file differ from those populated in the x1d header. So, I've punted and stuck with
using the x1d header parameters because it is easy and I think it will make little difference for most
sources. The largest slope listed in the xtractab results in a 10% shift in the spectral trace over the
length of the detector. In general, I should just check to be sure the extraction regions I'm using are
reasonable.
"""
data = [td[s] for s in ['time', 'wavelength', 'epsilon', 'dq', 'pha']]
if det == 'NUV':
# all "orders" (segments) of the NUV spectra fall on the same detector and are just offset in y,
# I'll just duplicate the events for each spectrum
segs = [s[-1] for s in xd['segment']]
orders = list(range(len(segs)))
else:
seg = segment[-1]
segs = [seg]
orders = [0 if seg == 'A' else 1]
for order, seg in zip(orders, segs):
if not (traceloc == 'stsci' or type(traceloc) in [int, float]) and det == 'NUV':
raise NotImplementedError('NUV detector has multiple traces on the same detector, so custom traceloc '
'has not been implemented.')
if traceloc == 'stsci':
yspec = xh['SP_LOC_'+seg]
elif traceloc == 'median':
Npixx = th['talen2']
x, y = td['xfull'], td['yfull']
yspec = _median_trace(x, y, Npixx, 8)
elif traceloc == 'lya':
Npixy = th['talen3']
yspec = _lya_trace(td['wavelength'], td['yfull'], Npixy)
elif type(traceloc) in [int, float]:
yspec = float(traceloc)
else:
raise ValueError('traceloc={} not recognized.'.format(traceloc))
xdisp = td['yfull'] - yspec
order_vec = _np.ones_like(xdisp, 'i2')*order
if det == 'NUV':
w = data[1]
keep = (xdisp > -15.) & (xdisp < 15.)
x = td['xfull']
xref, wref = x[keep], w[keep]
isort = _np.argsort(xref)
xref, wref = xref[isort], wref[isort]
wnew = _np.interp(x, xref, wref)
data_list.append(data[:1] + [wnew] + data[2:] + [xdisp, order_vec])
else:
data_list.append(data + [xdisp, order_vec])
data = list(map(_np.hstack, list(zip(*data_list))))
return data | d1f06d6b8b26894a3297471c74c291cd15b3cb22 | 3,658,227 |
def maximum_value(tab):
"""
brief: return maximum value of the list
args:
tab: a list of numeric value expects at leas one positive value
return:
the max value of the list
the index of the max value
raises:
ValueError if expected a list as input
ValueError if no positive value found
"""
if not(isinstance(tab, list)):
raise ValueError('Expected a list as input')
valMax = 0.0
valMaxIndex = -1;
nPositiveValues = 0
for i in range(len(tab)):
if tab[i] >= 0 and tab[i] > valMax:
valMax = float(tab[i])
valMaxIndex = i
nPositiveValues += 1
if nPositiveValues <= 0:
raise ValueError('No positive value found')
return valMax, valMaxIndex | 1c31daf3a953a9d781bc48378ef53323313dc22a | 3,658,228 |
import mpmath
def pdf(x, k, loc, scale):
"""
Probability density function for the Weibull distribution (for minima).
This is a three-parameter version of the distribution. The more typical
two-parameter version has just the parameters k and scale.
"""
with mpmath.extradps(5):
x = mpmath.mpf(x)
k, loc, scale = _validate_params(k, loc, scale)
if x == loc:
if k < 1:
return mpmath.mp.inf
elif k == 1:
return 1/scale
else:
return mpmath.mp.zero
if x < loc:
return mpmath.mp.zero
return mpmath.exp(logpdf(x, k, loc, scale)) | 77efc3f7ceda57377a412dc7641114cea3562953 | 3,658,229 |
def add_flight():
"""Allows users to add flights."""
if request.method == "GET":
return render_template("add.html", airports=AIRPORTS)
else:
# Move request.form into a dictionary that's a bit shorter to access than request.form
form = dict(request.form)
# Change hour and minute into integers
form["hour"] = int(form["hour"])
form["minute"] = int(form["minute"])
# TODO: Return error message if hour not in valid 0-23 range
# TODO: Return error message if minute not in valid 0-59 range
# TODO: Return error message if either airport not in AIRPORTS, or if they're equal
# Insert into database
insert_flight(db, form)
# TODO: Redirect user to homepage
return "TODO" | 7ac6d73bddfcba7475ff4e293a93fd3fe1fff546 | 3,658,230 |
def rescale_column_test(img, img_shape, gt_bboxes, gt_label, gt_num):
"""rescale operation for image of eval"""
img_data, scale_factor = mmcv.imrescale(img, (config.img_width, config.img_height), return_scale=True)
if img_data.shape[0] > config.img_height:
img_data, scale_factor2 = mmcv.imrescale(img_data, (config.img_height, config.img_height), return_scale=True)
scale_factor = scale_factor*scale_factor2
pad_h = config.img_height - img_data.shape[0]
pad_w = config.img_width - img_data.shape[1]
assert ((pad_h >= 0) and (pad_w >= 0))
pad_img_data = np.zeros((config.img_height, config.img_width, 3)).astype(img_data.dtype)
pad_img_data[0:img_data.shape[0], 0:img_data.shape[1], :] = img_data
img_shape = np.append(img_shape, (scale_factor, scale_factor))
img_shape = np.asarray(img_shape, dtype=np.float32)
return (pad_img_data, img_shape, gt_bboxes, gt_label, gt_num) | 11c7be91988aba926e5d9934443545a5112d2525 | 3,658,231 |
from typing import Union
from typing import List
import asyncio
from typing import Set
import os
async def start_workers(
search_requests: Union[List[SearchIndexRequest], asyncio.Queue]
) -> Set[str]:
"""Runs the pipeline using asyncio concurrency with three main coroutines:
- get results: fetch the search requests queue, perform the search and output the results
- download and parse the body: fetch the results queue, download and parse the body and meta from S3
- persist the data: fetch the 'to_persist' queue and serialize the data to ndjson files
"""
results_queue = asyncio.Queue(maxsize=settings.MAX_RESULTS_QUEUE_SIZE)
to_persist_queue = asyncio.Queue(maxsize=settings.MAX_PERSIST_QUEUE_SIZE)
search_end_event = asyncio.Event()
download_end_event = asyncio.Event()
if isinstance(search_requests, asyncio.Queue):
search_requests_queue = search_requests
else:
search_requests_queue = asyncio.Queue()
for request in search_requests:
await search_requests_queue.put(request)
num_search_requests = search_requests_queue.qsize()
logger.info(
f"Starting pipeline. Total of {num_search_requests} search index requests to process."
)
async with create_client() as client:
gateway = CDXGateway(client)
search_indexes = SearchIndexesExecutor(
gateway, results_queue, search_requests_queue
)
search_indexes_task = asyncio.create_task(search_indexes.run())
download_task = asyncio.create_task(
download_executor(
results_queue,
to_persist_queue,
client,
search_end_event,
)
)
store_results_task = asyncio.create_task(
store_results(to_persist_queue, download_end_event, os.getpid())
)
while not search_indexes_task.done():
await asyncio.sleep(1)
logger.debug(
f"Search index requests pending: {search_requests_queue.qsize()}"
)
else:
search_end_event.set()
while not download_task.done():
await asyncio.sleep(1)
else:
download_end_event.set()
while not store_results_task.done():
await asyncio.sleep(1)
for task in [search_indexes_task, download_task, store_results_task]:
exc = task.exception()
if exc:
logger.exception(exc_info=exc)
logger.info("Pipeline finished, exiting.")
return store_results_task.result() | b0cd669af8ebd7127c5bea4455e7e401e0e3ffd7 | 3,658,232 |
def resolvability_query(m, walks_):
"""
:param m: cost matrix
:param walks_: list of 0-percolation followed by its index of redundancy
as returned by percolation_finder
:return: M again untouched, followed by the list of $0$-percolation with
minimal index of redundancy, and with a flag, True if the minimal index
is 0 and so we have already our solution, False otherwise.
"""
min_redundancy = np.min(walks_[1::2])
filtered_walks = [walks_[i] for i in list(range(len(walks_)))[::2] \
if walks_[i + 1] == min_redundancy]
if min_redundancy == 0:
flag = True
else:
flag = False
return [m, filtered_walks, flag] | 968acb44a94952187cd91ed50ee2f7c1d1f0f54f | 3,658,233 |
import os
def lock_parent_directory(filename, timeout=10):
"""
Context manager that acquires a lock on the parent directory of the given
file path. This will block until the lock can be acquired, or the timeout
time has expired (whichever occurs first).
:param filename: file path of the parent directory to be locked
:param timeout: timeout (in seconds)
"""
return lock_path(os.path.dirname(filename), timeout=timeout) | 2774c61d30d6844ae7c9fa07dd4df2d0ecac2918 | 3,658,234 |
import math
def dsh(
incidence1: float, solar_az1: float, incidence2: float, solar_az2: float
):
"""Returns the Shadow-Tip Distance (dsh) as detailed in
Becker et al.(2015).
The input angles are assumed to be in radians.
This is defined as the distance between the tips of the shadows
in the two images for a hypothetical vertical post of unit
height. The "shadow length" describes the shadow of a hypothetical
pole so it applies whether there are actually shadows in the
image or not. It's a simple and consistent geometrical way to
quantify the difference in illumination. This quantity is
computed analogously to dp.
"""
def shx(inc: float, sunazgnd: float):
return -1 * math.tan(inc) * math.cos(sunazgnd)
def shy(inc: float, sunazgnd: float):
return math.tan(inc) * math.sin(sunazgnd)
shx1 = shx(incidence1, solar_az1)
shx2 = shx(incidence2, solar_az2)
shy1 = shy(incidence1, solar_az1)
shy2 = shy(incidence2, solar_az2)
return math.sqrt(math.pow(shx1 - shx2, 2) + math.pow(shy1 - shy2, 2)) | 5aef1c9d7ffeb3e8534568a53cf537d26d97324a | 3,658,235 |
def similarity(vec1, vec2):
"""Cosine similarity."""
return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2)) | 22a97fc08b4a8d7b662d0ba38eb6338aad587ca2 | 3,658,236 |
import _warnings
def epv00(date1, date2):
""" Earth position and velocity, heliocentric and barycentric, with
respect to the Barycentric Celestial Reference System.
:param date1, date2: TDB as a two-part Julian date.
:type date1, date2: float
:returns: a tuple of two items:
* heliocentric Earth position velocity as a numpy.matrix of shape \
2x3.
* barycentric Earth position/velocity as a numpy.matrix of shape \
2x3.
:raises: :exc:`UserWarning` if the date falls outside the range 1900-2100.
.. seealso:: |MANUAL| page 79
"""
pvh = _np.asmatrix(_np.zeros(shape=(2,3), dtype=float, order='C'))
pvb = _np.asmatrix(_np.zeros(shape=(2,3), dtype=float, order='C'))
s = _sofa.iauEpv00(date1, date2, pvh, pvb)
if s != 0:
_warnings.warn(_epv00_msg[s], UserWarning, 2)
return pvh, pvb | bb2c97517966168beb07e1732231bb0388eca0f3 | 3,658,237 |
def algorithm_conflict(old_config, new_config):
"""Generate an algorithm configuration conflict"""
return conflicts.AlgorithmConflict(old_config, new_config) | 8f9a1dcbf90b38efd69e028a35591d4d424d72c4 | 3,658,238 |
def nin():
"""
:return:
"""
def nin_block(num_channels, kernel_size, strides, padding):
blk = nn.Sequential()
blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'),
nn.Conv2D(num_channels, kernel_size=1, activation='relu'),
nn.Conv2D(num_channels, kernel_size=1, activation='relu'))
return blk
net = nn.Sequential()
net.add(nin_block(96, kernel_size=11, strides=4, padding=0),
nn.MaxPool2D(pool_size=3, strides=2),
nin_block(256, kernel_size=5, strides=1, padding=2),
nn.MaxPool2D(pool_size=3, strides=2),
nin_block(384, kernel_size=3, strides=1, padding=1),
nn.MaxPool2D(pool_size=3, strides=2), nn.Dropout(0.5),
# 标签类别数是10
nin_block(10, kernel_size=3, strides=1, padding=1),
# 全局平均池化层将窗口形状自动设置成输入的高和宽
nn.GlobalAvgPool2D(),
# 将四维的输出转成二维的输出,其形状为(批量大小, 10)
nn.Flatten())
X = nd.random.uniform(shape=(1, 1, 224, 224))
net.initialize()
for layer in net:
X = layer(X)
print(layer.name, 'output shape:\t', X.shape)
lr, num_epochs, batch_size, ctx = 0.1, 5, 128, d2l.try_gpu()
net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=224)
d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs) | 8641d6b34256168d09d2ab3c6fa0d4a0aff71410 | 3,658,239 |
def factorial(n):
"""
Return the product of the integers 1 through n.
n must be a nonnegative integer.
"""
return product(range(2, n + 1)) | 132b772d27e661816979ea9a5f2fa3b53114b55c | 3,658,240 |
def get_status_lines(frames, check_transposed=True):
"""
Extract status lines from the given frames.
`frames` can be 2D array (one frame), 3D array (stack of frames, first index is frame number), or list of array.
Automatically check if the status line is present; return ``None`` if it's not.
If ``check_transposed==True``, check for the case where the image is transposed (i.e., line becomes a column).
"""
if isinstance(frames,list):
return [get_status_lines(f,check_transposed=check_transposed) for f in frames]
if frames.shape[-1]>=4:
lines=_extract_line(frames,True)
if _check_magic(lines):
return lines
lines=_extract_line(frames,False)
if _check_magic(lines):
return lines
if check_transposed:
tframes=frames.T if frames.ndim==2 else frames.transpose((0,2,1))
return get_status_lines(tframes,check_transposed=False)
return None | 8bc68246b0c4987836414810a0308a2034b16368 | 3,658,241 |
def quote():
"""Get stock quote."""
if request.method == "POST":
quote = lookup(request.form.get("symbol"))
if quote == None:
return apology("invalid symbol", 400)
return render_template("quoted.html", quote=quote)
# User reached route via GET (as by clicking a link or via redi)
else:
return render_template("quote.html") | fb9d4b54e97a4d7b104f3c0d361347b99db68195 | 3,658,242 |
import json
def stats(api, containers=None, stream=True):
"""Get container stats container
When stream is set to true, the raw HTTPResponse is returned.
"""
path = "/containers/stats"
params = {'stream': stream}
if containers is not None:
params['containers'] = containers
try:
response = api.get(path, params=params)
if stream:
return response
return json.loads(str(response.read(), 'utf-8'))
except errors.NotFoundError as e:
api.raise_not_found(e, e.response, errors.ContainerNotFound) | 8e8da5ab96ab14871e3a5de363d8cae66fba5701 | 3,658,243 |
def add_engineered(features):
"""Add engineered features to features dict.
Args:
features: dict, dictionary of input features.
Returns:
features: dict, dictionary with engineered features added.
"""
features["londiff"] = features["dropofflon"] - features["pickuplon"]
features["latdiff"] = features["dropofflat"] - features["pickuplat"]
features["euclidean"] = tf.math.sqrt(
features["londiff"]**2 + features["latdiff"]**2)
return features | 56efe3ad922f5068c91ac702366416210e95dd74 | 3,658,244 |
def test_3tris():
"""3 triangles"""
conv = ToPointsAndSegments()
polygons = [
[[(0, 0), (1, 0), (0.5, -0.5), (0, 0)]],
[[(1, 0.5), (2, 0.5), (1.5, 1), (1, 0.5)]],
[[(2, 0), (3, 0), (2.5, -0.5), (2, 0)]],
]
for polygon in polygons:
conv.add_polygon(polygon)
return conv, 24, 16, 8 | fc9504e9c3ca0ae251ed67f8c99530ac6a1de73c | 3,658,245 |
def program_modules_with_functions(module_type, function_templates):
""" list the programs implementing a given set of functions
"""
prog_lsts = [program_modules_with_function(module_type, function_template)
for function_template in function_templates]
# get the intersection of all of them
progs = _reduce(set.intersection, map(set, prog_lsts))
return tuple(sorted(progs)) | c3cfd6ee6c9fdcca3926015016e5d28a2a1f599d | 3,658,246 |
def tasmax_below_tasmin(
tasmax: xarray.DataArray,
tasmin: xarray.DataArray,
) -> xarray.DataArray:
"""Check if tasmax values are below tasmin values for any given day.
Parameters
----------
tasmax : xarray.DataArray
tasmin : xarray.DataArray
Returns
-------
xarray.DataArray, [bool]
Examples
--------
To gain access to the flag_array:
>>> from xclim.core.dataflags import tasmax_below_tasmin
>>> ds = xr.open_dataset(path_to_tas_file)
>>> flagged = tasmax_below_tasmin(ds.tasmax, ds.tasmin)
"""
tasmax_lt_tasmin = _sanitize_attrs(tasmax < tasmin)
description = "Maximum temperature values found below minimum temperatures."
tasmax_lt_tasmin.attrs["description"] = description
tasmax_lt_tasmin.attrs["units"] = ""
return tasmax_lt_tasmin | c112dcf5b1a89f20151daaea8d56ea8b08262886 | 3,658,247 |
import torch
def laplace_attention(q, k, v, scale, normalize):
"""
Laplace exponential attention
Parameters
----------
q : torch.Tensor
Shape (batch_size, m, k_dim)
k : torch.Tensor
Shape (batch_size, n, k_dim)
v : torch.Tensor
Shape (batch_size, n, v_dim)
scale : float
scale in the L1 distance
normalize : bool
does the weights sum to 1?
Returns
-------
r : torch.Tensor
Shape (batch_size, m, v_dim)
"""
k = k.unsqueeze(1) # shape [B, 1, n, k_dim]
q = q.unsqueeze(2) # shape [B, m, 1, k_dim]
unnorm_weights = - torch.abs((k - q) / scale) # shape [B, m, n, k_dim]
unnorm_weights = torch.mean(weights, dim=-1) # shape [B, m, n]
if normalize:
weight_fn = F.softmax
else:
weight_fn = lambda x: 1 + torch.tanh(x)
weights = weight_fn(unnorm_weights) # shape [B, m, n]
r = torch.einsum('bij,bjk->bik', weights, v) # shape [B, m, v_dim]
return r | 600ac2f75e5396dfe6e169776425229ffedbc884 | 3,658,248 |
def instantiate(class_name, *args, **kwargs):
"""Helper to dynamically instantiate a class from a name."""
split_name = class_name.split(".")
module_name = split_name[0]
class_name = ".".join(split_name[1:])
module = __import__(module_name)
class_ = getattr(module, class_name)
return class_(*args, **kwargs) | d5906c835de9c2e86fbe3c15a9236662d6c7815d | 3,658,249 |
def fawa(pv_or_state, grid=None, levels=None, interpolate=None):
"""Finite-Amplitude Wave Activity according to Nakamura and Zhu (2010).
- If the first parameter is not a `barotropic.State`, `grid` must be
specified.
- `levels` specifies the number of contours generated for the equivalent
latitude zonalization.
- By default, FAWA is returned on the computed equivalent latitudes. To
obtain FAWA interpolated to a specific set of latitudes, specify these
with the `interpolate` parameter.
Returns a tuple containing FAWA and its latitude coordinates.
"""
grid, pv = _get_grid_vars(["pv"], grid, pv_or_state)
# Compute zonalized background state of PV
qq, yy = grid.zonalize_eqlat(pv, levels=levels, interpolate=None, quad="sptrapz")
# Use formulation that integrates PV over areas north of PV
# contour/equivalent latitude and then computes difference
q_int = np.vectorize(lambda q: grid.quad_sptrapz(pv, pv - q))
y_int = np.vectorize(lambda y: grid.quad_sptrapz(pv, grid.lat - y))
# Normalize by zonal circumference at each latitude
fawa = (q_int(qq) - y_int(yy)) / grid.circumference(yy)
# Interpolate to a given set of latitudes if specified
if interpolate is not None:
fawa = np.interp(interpolate, yy, fawa, left=0, right=0)
yy = interpolate
return fawa, yy | 91ca98bcb5abf71100ec9716f11c5cd38688836d | 3,658,250 |
def bmm_update(context, bmm_id, values, session=None):
"""
Updates Bare Metal Machine record.
"""
if not session:
session = get_session_dodai()
session.begin()
bmm_ref = bmm_get(context, bmm_id, session=session)
bmm_ref.update(values)
bmm_ref.save(session=session)
return bmm_ref | 71c73582c9f6b96ffc5021598c8ef017ccb5af83 | 3,658,251 |
from datetime import datetime
def to_unified(entry):
"""
Convert to a unified entry
"""
assert isinstance(entry, StatementEntry)
date = datetime.datetime.strptime(entry.Date, '%d/%m/%Y').date()
return UnifiedEntry(date, entry.Reference, method=entry.Transaction_Type, credit=entry.Money_In,
debit=entry.Money_Out) | d6eca8cbd970931569a2ad740298578c1106e7c9 | 3,658,252 |
def edit_profile():
"""
POST endpoint that edits the student profile.
"""
user = get_current_user()
json = g.clean_json
user.majors = Major.objects.filter(id__in=json['majors'])
user.minors = Minor.objects.filter(id__in=json['minors'])
user.interests = Tag.objects.filter(id__in=json['interests'])
user.save()
return _fetch_user_profile(user) | 4548c4621f31bbd159535b7ea0768167655b4f5b | 3,658,253 |
import six
def _stringcoll(coll):
"""
Predicate function to determine whether COLL is a non-empty
collection (list/tuple) containing only strings.
Arguments:
- `coll`:*
Return: bool
Exceptions: None
"""
if isinstance(coll, (list, tuple)) and coll:
return len([s for s in coll if isinstance(s, six.string_types)]) == len(coll)
return False | 9490a973900e230f70fea112f250cfe29be3a8bc | 3,658,254 |
import contextlib
def create_user_db_context(
database=Database(),
*args, **kwargs):
"""
Create a context manager for an auto-configured :func:`msdss_users_api.tools.create_user_db_func` function.
Parameters
----------
database : :class:`msdss_base_database:msdss_base_database.core.Database`
Database to use for managing users.
*args, **kwargs
Additional arguments passed to :func:`msdss_users_api.tools.create_user_db_func`.
Return
------
dict
Returns a dictionary with the following keys:
* ``get_user_db_context`` (:func:`contextlib.asynccontextmanager`): function returned from :func:`contextlib.asynccontextmanager` created from an auto-configured :func:`msdss_users_api.tools.create_user_db_func` function
* ``get_user_db`` (func): user db function from :func:`msdss_users_api.tools.create_user_db_func`
* ``async_database`` (:class:`databases:databases.Database`): auto-configured :class:`databases:databases.Database` from env vars
* ``database_engine`` (:class:`sqlalchemy:sqlalchemy.engine.Engine`): auto-configured :class:`sqlalchemy:sqlalchemy.engine.Engine` from env vars
Author
------
Richard Wen <[email protected]>
Example
-------
.. jupyter-execute::
from msdss_users_api.tools import *
results = create_user_db_context()
get_user_db_context = results['get_user_db_context']
async_database = results['async_database']
"""
# (create_user_db_func_db) Create databases
database_engine = database._connection
async_database = databases.Database(str(database_engine.url))
# (get_user_db_context_return) Return user db context
get_user_db = create_user_db_func(database_engine=database_engine, async_database=async_database, *args, **kwargs)
out = dict(
get_user_db_context=contextlib.asynccontextmanager(get_user_db),
get_user_db=get_user_db,
async_database=async_database,
database_engine=database_engine
)
return out | 2bafe1f31f19c2b115d54e61c124f06368694b6b | 3,658,255 |
from pathlib import Path
import textwrap
def config(base_config):
""":py:class:`nemo_nowcast.Config` instance from YAML fragment to use as config for unit tests."""
config_file = Path(base_config.file)
with config_file.open("at") as f:
f.write(
textwrap.dedent(
"""\
file group: allen
vhfr fvcom runs:
host: arbutus.cloud
run types:
nowcast x2:
results: /nemoShare/MEOPAR/SalishSea/fvcom-nowcast-x2/
forecast x2:
results: /nemoShare/MEOPAR/SalishSea/fvcom-forecast-x2/
nowcast r12:
results: /nemoShare/MEOPAR/SalishSea/fvcom-nowcast-r12/
results archive:
nowcast x2: /opp/fvcom/nowcast-x2/
forecast x2: /opp/fvcom/forecast-x2/
nowcast r12: /opp/fvcom/nowcast-r12/
"""
)
)
config_ = nemo_nowcast.Config()
config_.load(config_file)
return config_ | 3ed4253f660a87e8b24392b4eb926b387067010f | 3,658,256 |
def __check_complete_list(list_, nb_max, def_value):
"""
make sure the list is long enough
complete with default value if not
:param list_: list to check
:param nb_max: maximum length of the list
:param def_value: if list too small,
completes it with this value
:return: boolean, False if the list is too long
"""
if len(list_) <= nb_max:
list_.extend([def_value] * (nb_max - len(list_)))
return True
else:
return False | 9d439cd3eeea04e7a3e0e59aa4fe0bbb875bdfe4 | 3,658,257 |
import pickle
def _fill_function(func, globals, defaults, closure, dct):
""" Fills in the rest of function data into the skeleton function object
that were created via _make_skel_func().
"""
func.func_globals.update(globals)
func.func_defaults = defaults
func.func_dict = dct
if len(closure) != len(func.func_closure):
raise pickle.UnpicklingError("closure lengths don't match up")
for i in range(len(closure)):
_change_cell_value(func.func_closure[i], closure[i])
return func | 7ac454b7d6c43f49da1adf32522c03d28d88e6b7 | 3,658,258 |
import os
def symlink_gfid_to_path(brick, gfid):
"""
Each directories are symlinked to file named GFID
in .glusterfs directory of brick backend. Using readlink
we get PARGFID/basename of dir. readlink recursively till
we get PARGFID as ROOT_GFID.
"""
if gfid == ROOT_GFID:
return ""
out_path = ""
while True:
path = os.path.join(brick, ".glusterfs", gfid[0:2], gfid[2:4], gfid)
path_readlink = os.readlink(path)
pgfid = os.path.dirname(path_readlink)
out_path = os.path.join(os.path.basename(path_readlink), out_path)
if pgfid == "../../00/00/%s" % ROOT_GFID:
break
gfid = os.path.basename(pgfid)
return out_path | 5cede0058a3d1fa6697dfc84eac4c1315a9a531f | 3,658,259 |
import PIL
def resize_img(img, size, keep_aspect_ratio=True):
"""resize image using pillow
Args:
img (PIL.Image): pillow image object
size(int or tuple(in, int)): width of image or tuple of (width, height)
keep_aspect_ratio(bool): maintain aspect ratio relative to width
Returns:
(PIL.Image): pillow image
"""
if isinstance(size, int):
size = (size, size)
# get ratio
width, height = img.size
requested_width = size[0]
if keep_aspect_ratio:
ratio = width / requested_width
requested_height = height / ratio
else:
requested_height = size[1]
size = (int(requested_width), int(requested_height))
img = img.resize(size, resample=PIL.Image.LANCZOS)
return img | b3ddc2c929fa530f2af612c3021802f7bd1ed285 | 3,658,260 |
from typing import Tuple
def calculate_clip_from_vd(circuit: DiodeCircuit, v_d: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
:returns: v_in, v_out
"""
Rs = circuit.Rs
Is = circuit.diode.Is
n = circuit.diode.n
Vt = circuit.diode.Vt
Rp = circuit.Rp
Rd = circuit.Rd
Id = Is * (np.exp(v_d / (n * Vt)) - 1.0)
Vd = v_d
if Rp is None:
Vin = Vd + (Rd * Id) + (Rs * Id)
else:
Vin = Vd + (Rd * Id) + (Rs * Id) + (Vd * Rs / Rp) + (Id * Rd * Rs / Rp)
Vout = Vd + Id * Rd
return Vin, Vout | ec60231622c06f1a972af050c0403e8247aa75ed | 3,658,261 |
def tex_coord(x, y, n=8):
""" Return the bounding vertices of the texture square.
"""
m = 1.0 / n
dx = x * m
dy = y * m
return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m | 0776dd61aa83b9c9d8afe7574607022c9f7c2b77 | 3,658,262 |
def uniindtemp_compute(da: xr.DataArray, thresh: str = "0.0 degC", freq: str = "YS"):
"""Docstring"""
out = da - convert_units_to(thresh, da)
out = out.resample(time=freq).mean()
out.attrs["units"] = da.units
return out | 640a50e2a4ff61192f97e31c782be58437d301d0 | 3,658,263 |
def verify_parentheses(parentheses_string: str) -> bool:
"""Takes input string of only '{},[],()' and evaluates to True if valid."""
open_parentheses = []
valid_parentheses_set = {'(', ')', '[', ']', '{', '}'}
parentheses_pairs = {
')' : '(',
']' : '[',
'}' : '{'
}
if len(parentheses_string) % 2 != 0:
return False
for character in parentheses_string:
if character not in valid_parentheses_set:
raise ValueError("Only parentheses may be part of input string.")
if character in {'(', '[', '{'}:
open_parentheses.append(character)
if character in {')', ']', '}'}:
if len(open_parentheses) == 0:
return False
elif open_parentheses[-1] != parentheses_pairs[character]:
return False
del open_parentheses[-1]
if len(open_parentheses) > 0:
return False
return True | 2e2c07314d474b582f12af8cf53a311c0fa323c1 | 3,658,264 |
import time
def _stableBaselineTrainingAndExecution(env, typeAgent, numberOptions, mode):
""""Function to execute Baseline algorithms"""
if typeAgent == 2:
model = A2C(MlpPolicy, env, verbose=1)
else:
model = PPO2(MlpPolicy, env, verbose=1)
print("Training model....")
startTime = time()
model.learn(total_timesteps=DEFAULT_TRAINING_RANGE)
trainingTime = time() - startTime
print("Model trained in " + str(trainingTime) + ".")
print("Starting episodes....")
totalSteps, numberEpisodes, studentTotalScore, projectTotalScore, skillsTotalScore = 0, 0, 0., 0., 0.
bestResult = []
bestStudentScore = 0.0
bestStudentAssigned = 0
sumStudentAssigned = 0.0
allStudentsAssigned = []
allProjectAssignations = []
allSteps = []
allResults = []
allAverageStudentScore = []
allAverageProjectScore = []
allAverageSkillsScore = []
allStudentScores = []
allProjectScores = []
progressBar = Bar("-> Execution progress:", max=DEFAULT_EXECUTION_RANGE)
for i in range(DEFAULT_EXECUTION_RANGE):
state = env.reset(1)
steps, reward = 0, 0
done = False
print("Execution " + str(i))
while not done:
action, _state = model.predict(state)
state, reward, done, info = env.step(action)
# env.render()
steps += 1
numberEpisodes += 1
allSteps.append(steps)
averageStudentScore, averageProjectScore, averageSkillsScore, studentScores, projectScores, studentsAssigned, projectAssignations = env.stepScores()
allResults.append(env.finalState())
allAverageStudentScore.append(averageStudentScore)
allAverageProjectScore.append(averageProjectScore)
allAverageSkillsScore.append(averageSkillsScore)
allStudentScores.append(studentScores)
allProjectScores.append(projectScores)
allStudentsAssigned.append(studentsAssigned)
allProjectAssignations.append(projectAssignations)
averageStudentAssigned = sum(studentsAssigned) / numberOptions
sumStudentAssigned += sum(studentsAssigned) / numberOptions
if averageStudentAssigned >= bestStudentAssigned and averageStudentScore > bestStudentScore:
bestStudentAssigned = averageStudentAssigned
bestStudentScore = averageStudentScore
bestResult = env.finalState()
progressBar.next()
progressBar.finish()
print("Execution done.")
print(trainingTime)
if mode == 0:
_executionAnalysis(numberEpisodes, allStudentScores, allProjectScores, allSteps, bestStudentAssigned,
numberOptions, allStudentsAssigned, allProjectAssignations, sumStudentAssigned)
return bestResult | 7fd91b28ab475fb43ea7e6af4ca17302863e269a | 3,658,265 |
def string_to_hexadecimale_device_name(name: str) -> str:
"""Encode string device name to an appropriate hexadecimal value.
Args:
name: the desired name for encoding.
Return:
Hexadecimal representation of the name argument.
"""
length = len(name)
if 1 < length < 33:
hex_name = hexlify(name.encode())
zeros_pad = ("00" * (32 - length)).encode()
return (hex_name + zeros_pad).decode()
raise ValueError("name length can vary from 2 to 32") | 53d5c5a221a2c3dac46c5fb15d051d78592b109b | 3,658,266 |
def createTeam(
firstIndex, secondIndex, isRed, first = 'DefensiveAgent', second = 'OffensiveAgent'):
"""
This function should return a list of two agents that will form the
team, initialized using firstIndex and secondIndex as their agent
index numbers. isRed is True if the red team is being created, and
will be False if the blue team is being created.
As a potentially helpful development aid, this function can take
additional string-valued keyword arguments ("first" and "second" are
such arguments in the case of this function), which will come from
the --redOpts and --blueOpts command-line arguments to capture.py.
For the nightly contest, however, your team will be created without
any extra arguments, so you should make sure that the default
behavior is what you want for the nightly contest.
"""
return [eval(first)(firstIndex), eval(second)(secondIndex)] | b99e8f548b6e6166517fb35a89f5381ef6d7692b | 3,658,267 |
def str_dice(die):
"""Return a string representation of die.
>>> str_dice(dice(1, 6))
'die takes on values from 1 to 6'
"""
return 'die takes on values from {0} to {1}'.format(smallest(die), largest(die)) | 29eb7f6aa43e068e103016bbc8c35699fbf4a3ea | 3,658,268 |
import pyregion
def pyregion_subset(region, data, mywcs):
"""
Return a subset of an image (`data`) given a region.
Parameters
----------
region : `pyregion.parser_helper.Shape`
A Shape from a pyregion-parsed region file
data : np.ndarray
An array with shape described by WCS
mywcs : `astropy.wcs.WCS`
A world coordinate system describing the data
"""
shapelist = pyregion.ShapeList([region])
if shapelist[0].coord_format not in ('physical','image'):
# Requires astropy >0.4...
# pixel_regions = shapelist.as_imagecoord(self.wcs.celestial.to_header())
# convert the regions to image (pixel) coordinates
celhdr = mywcs.sub([wcs.WCSSUB_CELESTIAL]).to_header()
pixel_regions = shapelist.as_imagecoord(celhdr)
else:
# For this to work, we'd need to change the reference pixel after cropping.
# Alternatively, we can just make the full-sized mask... todo....
raise NotImplementedError("Can't use non-celestial coordinates with regions.")
pixel_regions = shapelist
# This is a hack to use mpl to determine the outer bounds of the regions
# (but it's a legit hack - pyregion needs a major internal refactor
# before we can approach this any other way, I think -AG)
mpl_objs = pixel_regions.get_mpl_patches_texts()[0]
# Find the minimal enclosing box containing all of the regions
# (this will speed up the mask creation below)
extent = mpl_objs[0].get_extents()
xlo, ylo = extent.min
xhi, yhi = extent.max
all_extents = [obj.get_extents() for obj in mpl_objs]
for ext in all_extents:
xlo = xlo if xlo < ext.min[0] else ext.min[0]
ylo = ylo if ylo < ext.min[1] else ext.min[1]
xhi = xhi if xhi > ext.max[0] else ext.max[0]
yhi = yhi if yhi > ext.max[1] else ext.max[1]
log.debug("Region boundaries: ")
log.debug("xlo={xlo}, ylo={ylo}, xhi={xhi}, yhi={yhi}".format(xlo=xlo,
ylo=ylo,
xhi=xhi,
yhi=yhi))
subwcs = mywcs[ylo:yhi, xlo:xhi]
subhdr = subwcs.sub([wcs.WCSSUB_CELESTIAL]).to_header()
subdata = data[ylo:yhi, xlo:xhi]
mask = shapelist.get_mask(header=subhdr,
shape=subdata.shape)
log.debug("Shapes: data={0}, subdata={2}, mask={1}".format(data.shape, mask.shape, subdata.shape))
return (xlo,xhi,ylo,yhi),mask | 03578a1957b8cbbae573588f85f24f8f528dc05b | 3,658,269 |
def transformer_encoder(encoder_input,
encoder_self_attention_bias,
hparams,
name="encoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True):
"""A stack of transformer layers.
Args:
encoder_input: a Tensor
encoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This must either be
passed in, which we do for "packed" datasets, or inferred from
encoder_self_attention_bias. The knowledge about padding is used
for pad_remover(efficiency) and to mask out padding in convolutional
layers.
save_weights_to: an optional dictionary to capture attention weights
for visualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
Returns:
y: a Tensors
"""
x = encoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS,
value=hparams.num_encoder_layers or hparams.num_hidden_layers)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT,
value=hparams.attention_dropout)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DENSE,
value={
"use_bias": "false",
"num_heads": hparams.num_heads,
"hidden_size": hparams.hidden_size
})
with tf.variable_scope(name):
if nonpadding is not None:
padding = 1.0 - nonpadding
else:
padding = common_attention.attention_bias_to_padding(
encoder_self_attention_bias)
nonpadding = 1.0 - padding
pad_remover = None
if hparams.use_pad_remover and not common_layers.is_xla_compiled():
pad_remover = expert_utils.PadRemover(padding)
for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):
initial_sparsity = None
if hparams.get("load_masks_from"):
initial_sparsity = hparams.get("initial_sparsity")
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
y = sparse_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
encoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
vars_3d=hparams.get("attention_variables_3d"),
sparsity_technique=hparams.get("sparsity_technique"),
threshold=hparams.get("log_alpha_threshold"),
training=hparams.get("mode") == tf.estimator.ModeKeys.TRAIN,
clip_alpha=hparams.get("clip_log_alpha"),
initial_sparsity=initial_sparsity,
split_heads=hparams.get("split_heads"))
x = common_layers.layer_postprocess(x, y, hparams)
with tf.variable_scope("ffn"):
y = transformer_ffn_layer(
common_layers.layer_preprocess(x, hparams),
hparams,
pad_remover)
x = common_layers.layer_postprocess(x, y, hparams)
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NORM,
value={"hidden_size": hparams.hidden_size})
return common_layers.layer_preprocess(x, hparams) | 0f56315a972acc235dba7ec48c8b83b84bd6b3f1 | 3,658,270 |
import os
def update(event, context):
"""
Place your code to handle Update events here
To return a failure to CloudFormation simply raise an exception, the exception message will be sent to CloudFormation Events.
"""
region = os.environ['AWS_REGION']
prefix_list_name = event['ResourceProperties']['PrefixListName']
physical_resource_id = 'RetrievedPrefixList'
response_data = get_pl_id(prefix_list_name, region)
return physical_resource_id, response_data | cc5762bbd6fb79f6ec4823d706139c2d4abb3a31 | 3,658,271 |
import re
def compute_dict(file_path):
"""Computes the dict for a file whose path is file_path"""
file_dict = {}
with open(file_path, encoding = 'utf8') as fin:
for line in fin:
line = line.strip()
txt = re.sub('([^a-zA-Z0-9\s]+)',' \\1 ',line)
txt = re.sub('([\s]+)',' ',txt)
words = txt.split(" ")
for word in words:
w = str(word)
if(w not in file_dict):
file_dict[w] = 1
else:
file_dict[w] = file_dict[w] + 1
return file_dict | 821e29181aad781279b27174be0fd7458b60481f | 3,658,272 |
def get_scale(lat1, lon1, var, desired_distance, unit='miles'):
"""
Calculate the difference in either latitude or longitude that is equivalent
to some desired distance at a given point on Earth. For example, at a specific
point, how much does latitude need to change (assuming longitude is constant) to
be equal to 60 miles? This is especially important since lines of longitude are
closer together near Earth's poles. This function is helpful when converting
latitude and longitude coordinates to pixel coordinates in order to plot a point
on the screen.
Parameters:
1 - latitude of position in decimal degrees
2 - longitude of position in decimal degrees
3 - "lat" or "lon" to specify if calulating change for latitude or longitude
4 - the desired distance from the given point
5 - unit of measure (optional), "miles" or "km", default is miles
Returns:
The difference in latitude or longitude
"""
# Create a second point that is initially set to the starting point
# The idea is to that push this point farther and farther away (either by lat or lon)
# until it is the desired distance away
lat2 = lat1
lon2 = lon1
# Create a variable for tracking the actual distance between the two points, which
# can be compared against the desired distance
actual_distance = get_distance(lat1, lon1, lat2, lon2, unit)
n = 1 # Place value to increase or decrease lat/lon by (1, .1, .01, .001, etc.)
decrease_n = False # Flag to indicate if n should be decreased
if var == 'lat':
var_value = lat2 # Variable for holding either latitude or longitude (whichever is being modified)
elif var == 'lon':
var_value = lon2
else:
print '\nvalue not recognized: ' + str(var) + '\n'
# Keep looping until the difference between the desired distance and the actual distance
# is less than 0.0001 (in whatever units)... basically until it's really close
while abs(round(desired_distance - actual_distance, 4)) > 0.0001:
# Keep increasing the var_value until the actual distance is too great, then start decreasing until it's too small
# If desired distance is greater than actual, add n to the var_value
if desired_distance > actual_distance:
var_value += n
var_value = round(var_value, 6) # Round to 6 decimal places to clean up floating point messiness
decrease_n = True # Indicate it's ok the decrease n if the following else statement is evaluated
# If actual distance is greater than desired, subtract n from var_value
else:
if decrease_n:
n *= 0.1 # Decrease n by a factor of ten
var_value -= n
var_value = round(var_value, 6)
decrease_n = False # Don't decrease n until after the next time the if statement is evaluated
# Recalculate the actual distance
if var == 'lat':
actual_distance = get_distance(lat1, lon1, var_value, lon2, unit)
else:
actual_distance = get_distance(lat1, lon1, lat2, var_value, unit)
# print round(actual_distance, 4) for testing purposes
# Return the difference between lat2 and lat1 (or lon2/lon1) that is equal to the desired distance
if var == 'lat':
return abs(round(var_value - lat1, 6))
else:
return abs(round(var_value - lon1, 6)) | 258d95b1372d1b863f121552abb2ba6047f5aaad | 3,658,273 |
from typing import Union
from pathlib import Path
async def connect_unix(path: Union[str, PathLike]) -> UNIXSocketStream:
"""
Connect to the given UNIX socket.
Not available on Windows.
:param path: path to the socket
:return: a socket stream object
"""
path = str(Path(path))
return await get_asynclib().connect_unix(path) | 825d69aa19afd593b355063639cbcd91cb23e9fa | 3,658,274 |
def isMatch(s, p):
""" Perform regular simple expression matching
Given an input string s and a pattern p, run regular expression
matching with support for '.' and '*'.
Parameters
----------
s : str
The string to match.
p : str
The pattern to match.
Returns
-------
bool
Was it a match or not.
"""
dp = [[False] * (len(p) + 1) for _ in range(len(s) + 1)]
dp[0][0] = True
# The only way to match a length zero string
# is to have a pattern of all *'s.
for ii in range(1, len(p)):
if p[ii] == "*" and dp[0][ii-1]:
dp[0][ii + 1] = True
for ii in range(len(s)):
for jj in range(len(p)):
# Matching a single caracter c or '.'.
if p[jj] in {s[ii], '.'}:
dp[ii+1][jj+1] = dp[ii][jj]
elif p[jj] == '*':
# Double **, which is equivalent to *
if p[jj-1] not in {s[ii], '.'}:
dp[ii+1][jj+1] = dp[ii+1][jj-1]
# We can match .* or c* multiple times, once, or zero
# times (respective clauses in the or's)
else:
dp[ii+1][jj+1] = dp[ii][jj+1] or dp[ii+1][jj] or dp[ii+1][jj-1]
return dp[-1][-1] | 92cd3171afeb73c6a58bbcd3d3ea6d707401cb09 | 3,658,275 |
def cepheid_lightcurve_advanced(band, tarr, m0, period, phaseshift, shape1, shape2, shape3, shape4, datatable=None):
"""
Generate a Cepheid light curve. More flexibility allowed.
band: one of "B", "V", "I"
tarr: times at which you want the light curve evaluated
m0: mean magnitude for the light curve
period: same units as tarr
phaseshift: same units as tarr
shape1-4: parameters determining the shape of the light curve.
These are the first four principle components from Yoachim et al. 2009
They should generally be > 0.
You can use
datatable: which set of templates to use.
By default, it loads the long period templates.
Long period: >10 days; Short period: <10 days
Can also pass an integer.
Even int -> long period, odd int -> short period.
Note: for speed in fitting, read the table you want and pass it in.
"""
allowed_bands = ["I","V","B"]
assert band.upper() in allowed_bands
if datatable is None:
datatable = load_longperiod_datatable()
elif isinstance(datatable,(int,float)):
datatable = int(datatable)
if (datatable % 2) == 1:
datatable = load_shortperiod_datatable()
else:
datatable = load_longperiod_datatable()
Nt = len(tarr)
tstack = np.ravel([tarr for x in range(3)])
#p0 = m0i, m0v, m0b, period, phase shift, tbreak, tbreak2
p0 = [m0,m0,m0,period,phaseshift,Nt,2*Nt, shape1, shape2, shape3, shape4]
lcs = gen_lc(tstack, p0, datatable)
lc = lcs[allowed_bands.index(band)]
return lc | 7e9a94f4e59f3da31c8c21a6e35822a0ac0d8051 | 3,658,276 |
def app_list(context):
"""
Renders the app list for the admin dashboard widget.
"""
context["dashboard_app_list"] = admin_app_list(context["request"])
return context | 699aa55403169f87c26fc9655d8c6dcb29aa14d2 | 3,658,277 |
def path_depth(path: str, depth: int = 1) -> str:
"""Returns the `path` up to a certain depth.
Note that `depth` can be negative (such as `-x`) and will return all
elements except for the last `x` components
"""
return path_join(path.split(CONFIG_SEPARATOR)[:depth]) | c04899974560b2877db313fa0444203cc483a2b0 | 3,658,278 |
def read_config_file(filename, preserve_order=False):
"""
Read and parse a configuration file.
Parameters
----------
filename : str
Path of configuration file
Returns
-------
dict
Configuration dictionary
"""
with open(filename) as f:
return parse_config(f, preserve_order) | 6a0aab4ae0da3abdddf080c98ee69eb92d2d8d04 | 3,658,279 |
def languages_list_handler():
"""Get list of supported review languages (language codes from ISO 639-1).
**Example Request:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/languages \\
-X GET
**Example Response:**
.. code-block:: json
{
"languages": [
"aa",
"ab",
"af",
"ak",
"yo",
"za",
"zh",
"zu"
]
}
:resheader Content-Type: *application/json*
"""
return jsonify(languages=supported_languages) | 5b8791ad5d71a94486d96379f62ed9ebf850ec59 | 3,658,280 |
def corpus_subdirs(path):
""" pathの中のdir(txt以外)をlistにして返す """
subdirs = []
for x in listdir(path):
if not x.endswith('.txt'):
subdirs.append(x)
return subdirs | 645f198f78795dbc5c14b7cfd400fa1e94dc9244 | 3,658,281 |
def edit_string_for_tags(tags):
"""
Given list of ``Tag`` instances or tag strings, creates a string
representation of the list suitable for editing by the user, such
that submitting the given string representation back without
changing it will give the same list of tags.
Tag names which contain commas will be double quoted.
If any tag name which isn't being quoted contains whitespace, the
resulting string of tag names will be comma-delimited, otherwise
it will be space-delimited.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
names = []
for tag in tags:
if hasattr(tag, 'name'):
name = tag.name
elif isinstance(tag, (str, unicode,)):
name = tag
else:
continue
if u',' in name or u' ' in name:
names.append('"%s"' % name)
else:
names.append(name)
return u', '.join(sorted(names)) | a05b6cb12e36304096d076e015077f1ec1cc3432 | 3,658,282 |
from typing import Optional
def convert_postgres_array_as_string_to_list(array_as_string: str) -> Optional[list]:
"""
Postgres arrays are stored in CSVs as strings. Elasticsearch is able to handle lists of items, but needs to
be passed a list instead of a string. In the case of an empty array, return null.
For example, "{this,is,a,postgres,array}" -> ["this", "is", "a", "postgres", "array"].
"""
return array_as_string[1:-1].split(",") if len(array_as_string) > 2 else None | cc64fe8e0cc765624f80abc3900985a443f76792 | 3,658,283 |
def generate_prime_number(min_value=0, max_value=300):
"""Generates a random prime number within the range min_value to max_value
Parameters
----------
min_value : int, optional
The smallest possible prime number you want, by default 0
max_value : int, optional
The largest possible prime number you want, by default 300
Returns
-------
int
A randomly selected prime number in the range min_value to max_value
"""
# Create a list of prime values within the range
primes = [number for number in range(min_value,max_value) if is_prime(number)]
return choice(primes) | 539f74fcdba2c366b0fe13b0bc0fab6727300ec1 | 3,658,284 |
def sort_extended_practitioner(practitioner):
"""
sort on date latestDate
Then alpha on other practitioners
:param practitioner:
:return: practitioner
"""
uniques = []
for p in practitioner:
if find_uniques(p, uniques):
uniques.append(p)
return uniques | 476d9adf9d93b88f20166b1e95715aaa54bd67f9 | 3,658,285 |
def lti13_login_params_dict(lti13_login_params):
"""
Return the initial LTI 1.3 authorization request as a dict
"""
utils = LTIUtils()
args = utils.convert_request_to_dict(lti13_login_params)
return args | 026e65c132666816f774a05f6977dac9ab194b77 | 3,658,286 |
def calcShannonEnt(dataset):
"""
计算数据集的熵
输入:数据集
输出:熵
"""
numEntris = len(dataset)
labelCounts = {}
for featVec in dataset:
currentLabel = featVec[-1] #每行数据中的最后一个数,即数据的决策结果 label
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel]+=1 #labelCounts记录了叶节点的种类(keys)和每个类的数量(values)
#
shannonEnt = 0
for key in labelCounts:
prob = float(labelCounts[key])/numEntris
shannonEnt -= prob*log(prob,2)
return shannonEnt | 9b64d0ad0bb517deb77c24f3c08e004d255daa68 | 3,658,287 |
import array
def get_boundaries_medians(things, lowers=[], uppers=[]):
"""Return the boundaries and medians of given percentage ranges.
Parameters:
1. things: a list of numbers
2. lowers: lower percentage limits
3. uppers: upper percentage limits
Returns:
lower, median, upper
"""
# if neither list nor array nor tuple, just return None
if type(things)!=list and type(things)!=array and type(things)!=tuple: return [], [], []
n_things = len(things)
if n_things == 0: return [], [], []
sthings = sorted(list(things))
l = map(lambda x: int(round(1.0*x*n_things/100))-1, lowers)
r = map(lambda x: int(round(1.0*x*n_things/100)), uppers)
return map(lambda x: sthings[x], l), map(lambda x, y: median(sthings[max(0, x):min(n_things, y+1)]), l, r), map(lambda y: sthings[y], r) | d83365ad2d9598dc19c279a6b20424746f53d6ce | 3,658,288 |
def getMeanBySweep(abf, markerTime1, markerTime2):
"""
Return the mean value between the markers for every sweep.
"""
assert isinstance(abf, pyabf.ABF)
pointsPerSecond = abf.dataRate
sweepIndex1 = pointsPerSecond * markerTime1
sweepIndex2 = pointsPerSecond * markerTime2
means = []
for i in range(abf.sweepCount):
abf.setSweep(i)
segment = abf.sweepY[sweepIndex1:sweepIndex2]
segmentMean = np.mean(segment)
means.append(segmentMean)
return means | 8051d67c832c9b331798f896b8f98c2673770a94 | 3,658,289 |
import json
def handler(event, context):
"""
Params:
-------
event (dict):
content (dict):
Both params are standard lambda handler invocation params but not used within this
lambda's code.
Returns:
-------
(string): JSON-encoded dict with top level keys for each of the possible
queries that can be run against the `/datasets` endpoint (key: _all_ contains
result of the LIST operation, each of other keys contain the result of
GET /datasets/{spotlight_id | "global"})
"""
# TODO: defined TypedDicts for these!
datasets = _gather_json_data(DATASETS_JSON_FILEPATH)
sites = _gather_json_data(SITES_JSON_FILEPATH)
result = json.dumps(_gather_datasets_metadata(datasets, sites))
print(
f"Saving generated metadata to {DATASET_METADATA_FILENAME} in bucket {metadata_host_bucket.name}"
)
metadata_host_bucket.put_object(
Body=result, Key=DATASET_METADATA_FILENAME, ContentType="application/json",
)
return result | 21d49c8bdf174633ec33f64dd77d16485e694f1d | 3,658,290 |
def add_prefix(key):
"""Dummy key_function for testing index code."""
return "id_" + key | 96dda0bd57b4eb89f17c8bb69ad48e3e1675a470 | 3,658,291 |
def schedule_conv2d_nhwc_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc_tensorcore" in op.tag:
schedule_nhwc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s | 318be211f02469c1e971a9303f48f92f88af5755 | 3,658,292 |
import tqdm
def simulate(config, show_progress=False):
"""Simulate incarceration contagion dynamics.
Parameters
----------
config : Config
Config object specifying simulation parameters.
Returns
-------
dict
Dictionary specifying simulated population of agents.
"""
popu = initialize(config)
agents = popu.values()
def display(range_obj):
if show_progress:
range_obj = tqdm(range_obj)
return range_obj
# these are in years. need to work in terms of months
for itr in display(range(config.start_iter, config.end_iter)):
for month in range(12):
# infection step
for person in agents:
# random infection, not due to contagion
if valid_age(person, itr, config.min_age):
person["infected"] += infect("*", "*", "*")
# infect connected people
if person["incarcerated"] > 0:
person["incarcerated"] -= 1
person["months_in_prison"] += 1
spread_infection(popu, person, itr, month, config)
# sentencing step
for person in agents:
if person["infected"] and not person["incarcerated"]:
assign_sentence(person, itr, month, config)
person["infected"] = 0
return popu | b5b813c1038d6b473ff3f4fa6beccdad2615f2af | 3,658,293 |
import cel_import
def get_event_log(file_path: str = None, use_celonis=False):
"""
Gets the event log data structure from the event log file.
Dispatches the methods to be used by file tyoe
:param use_celonis: If the attribute is set to true the event log will be retrieved from celonis
:param file_path: Path to the event-log file
:return:EventLog data structure
"""
if file_path is None and not use_celonis:
raise ValueError("Parameters file_path was None and use_celonis was false at the same time."
"This behavior is not supported")
if use_celonis:
return cel_import.get_event_log_from_celonis()
else:
file_path_lowercase = file_path.lower()
if file_path_lowercase.endswith(".xes"):
return __handle_xes_file(file_path)
else:
raise ValueError('The input file was not a XES file') | 9b77a2bed9d6551cc2d0e4eb607de0d81b95c6f3 | 3,658,294 |
def find_peak(corr, method='gaussian'):
"""Peak detection algorithm switch
After loading the correlation window an maximum finder is invoked.
The correlation window is cut down to the necessary 9 points around the maximum.
Afterwards the maximum is checked not to be close to the boarder of the correlation frame.
This cropped window is used in along with the chosen method to interpolate the sub pixel shift.
Each interpolation method returns a tuple with the sub pixel shift in x and y direction.
The maximums position and the sub pixel shift are added and returned.
If an error occurred during the sub pixel interpolation the shift is set to nan.
Also if the interpolation method is unknown an exception in thrown.
:param corr: correlation window
:param method: peak finder algorithm (gaussian, centroid, parabolic, 9point)
:raises: Sub pixel interpolation method not found
:returns: shift in interrogation window
"""
i, j = np.unravel_index(corr.argmax(), corr.shape)
if check_peak_position(corr, i, j) is False:
return np.nan, np.nan
window = corr[i-1:i+2, j-1:j+2]
if method == 'gaussian':
subpixel_interpolation = gaussian
elif method == 'centroid':
subpixel_interpolation = centroid
elif method == 'parabolic':
subpixel_interpolation = parabolic
elif method == '9point':
subpixel_interpolation = gaussian2D
else:
raise Exception('Sub pixel interpolation method not found!')
try:
dx, dy = subpixel_interpolation(window)
except:
return np.nan, np.nan
else:
return (i + dx, j + dy) | 685eb87cefc6cd2566a9e9d40f827a1fea010b73 | 3,658,295 |
import os
def mesh_plot(
mesh: PyEITMesh,
el_pos,
mstr="",
figsize=(9, 6),
alpha=0.5,
offset_ratio=0.075,
show_image=False,
show_mesh=False,
show_electrode=True,
show_number=False,
show_text=True,
):
"""plot mesh structure (base layout)"""
# load mesh structure
pts = mesh.node
tri = mesh.element
fig, ax = plt.subplots(figsize=figsize)
ax.set_facecolor("black")
ax.set_aspect("equal")
# load background
if show_image and os.path.exists(mstr):
image_name = mstr.replace("mes", "bmp")
im = plt.imread(image_name)
ax.imshow(im)
else:
# without imshow, the yaxis should be inverted
ax.invert_yaxis()
# show mesh structure
if show_mesh:
ax.triplot(pts[:, 0], pts[:, 1], tri, alpha=alpha)
# show electrodes markers
if show_electrode:
ax.plot(pts[el_pos, 0], pts[el_pos, 1], "yo")
# annotate electrodes numbering
if show_number:
for i, e in enumerate(el_pos):
ax.text(pts[e, 0], pts[e, 1], np.str(i + 1), color="r", size=12)
# annotate (L) at offset_ratio*d beside node 0
if show_text:
xb, xa = pts[el_pos[8], 0], pts[el_pos[0], 0]
d = np.abs(xa - xb)
offset = d * offset_ratio
x, y = xa + offset, pts[el_pos[0], 1]
ax.text(x, y, "L", size=20, color="w")
# enlarge the right of axes if using annotation
ax.set_xlim([xb - offset, xa + 2 * offset])
# clean up axis
ax.grid("off")
plt.setp(ax.get_xticklines(), visible=False)
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
return fig, ax | ba82a9ceebd6b8470b3fb441204cd0d8155e5cdb | 3,658,296 |
def binaryToString(binary):
"""
从二进制字符串转为 UTF-8 字符串
"""
index = 0
string = []
rec = lambda x, i: x[2:8] + (rec(x[8:], i - 1) if i > 1 else '') if x else ''
fun = lambda x, i: x[i + 1:8] + rec(x[8:], i - 1)
while index + 1 < len(binary):
chartype = binary[index:].index('0') # 存放字符所占字节数,一个字节的字符会存为0
length = chartype * 8 if chartype else 8
string.append(chr(int(fun(binary[index:index + length], chartype), 2)))
index += length
return ''.join(string) | 2044109d573abe7c9428b64b289b5aa82ec4d624 | 3,658,297 |
import functools
import logging
def disable_log_warning(fun):
"""Temporarily set FTP server's logging level to ERROR."""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
logger = logging.getLogger('pyftpdlib')
level = logger.getEffectiveLevel()
logger.setLevel(logging.ERROR)
try:
return fun(self, *args, **kwargs)
finally:
logger.setLevel(level)
return wrapper | 6990a2a1a60ea5a24e4d3ac5c5e7fbf443825e48 | 3,658,298 |
def create_csv_step_logger(save_dir: pyrado.PathLike, file_name: str = "progress.csv") -> StepLogger:
"""
Create a step-based logger which only safes to a csv-file.
:param save_dir: parent directory to save the results in (usually the algorithm's `save_dir`)
:param file_name: name of the cvs-file (with ending)
:return: step-based logger
"""
logger = StepLogger()
logfile = osp.join(save_dir, file_name)
logger.printers.append(CSVPrinter(logfile))
return logger | 355c205cf5f42b797b84005c1485c2b6cae74c2e | 3,658,299 |
Subsets and Splits