content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_at_content(sequence):
"""Return content of AT in sequence, as float between 0 and 1, inclusive. """
sequence = sequence.upper()
a_content = sequence.count('A')
t_content = sequence.count('T')
return round((a_content+t_content)/len(sequence), 2)
|
6316d29cdb9d7129f225f2f79a50485fb6919e32
| 18,300 |
def page_not_found(e):
"""Handle nonexistin pages."""
_next = get_next_url()
if _next:
flash("Page Not Found", "danger")
return redirect(_next)
return render_template("404.html"), 404
|
9267c65fb842309cf1e877239be4d7fff7b1b634
| 18,301 |
def test_query(p: int = 1) -> int:
"""
Example 2 for a unit test
:param p: example of description
:return: return data
"""
return p
|
69c01914db1d7a5cebf1c4e78f7c5dc7f778b4b4
| 18,302 |
def mirror_1d(d, xmin=None, xmax=None):
"""If necessary apply reflecting boundary conditions."""
if xmin is not None and xmax is not None:
xmed = (xmin+xmax)/2
return np.concatenate((2*xmin-d[d < xmed], d, 2*xmax-d[d >= xmed]))
elif xmin is not None:
return np.concatenate((2*xmin-d, d))
elif xmax is not None:
return np.concatenate((d, 2*xmax-d))
else:
return d
|
0b538222dd227171ac4a3cf1ac2d8a30361eccf1
| 18,303 |
def calc_vertical_avg(fld,msk):
"""Compute vertical average, ignoring continental or iceshelf points """
# Make mask of nans, assume input msk is 3D of same size as fld 3 spatial dims
nanmsk = np.where(msk==1,1,np.NAN)
v_avg = fld.copy()
v_avg.values = v_avg.values*msk.values
if 'Z' in fld.dims:
vdim = 'Z'
elif 'Zl' in fld.dims:
vdim = 'Zl'
else:
raise TypeError('Could not find recognizable vertical field in input dataset')
# Once vertical coordinate is found, compute avg along dimension
v_avg = v_avg.sum(dim=vdim,skipna=True)
return v_avg
|
7cd512cf2642864e9974e18ef582f541f65b3e96
| 18,304 |
import logging
import sys
def get_logger(log_file, log_level="info"):
"""
create logger and output to file and stdout
"""
assert log_level in ["info", "debug"]
log_formatter = LogFormatter()
logger = logging.getLogger()
log_level = {"info": logging.INFO, "debug": logging.DEBUG}[log_level]
logger.setLevel(log_level)
stream = logging.StreamHandler(sys.stdout)
stream.setFormatter(log_formatter)
logger.addHandler(stream)
filep = logging.FileHandler(log_file, mode="a")
filep.setFormatter(log_formatter)
logger.addHandler(filep)
return logger
|
dc5b7398738f19a6045d874bde039deae17c0602
| 18,305 |
def replace(data, match, repl):
"""Replace values for all key in match on repl value.
Recursively apply a function to values in a dict or list until the input
data is neither a dict nor a list.
"""
if isinstance(data, dict):
return {
key: repl if key in match else replace(value, match, repl)
for key, value in data.items()
}
if isinstance(data, list):
return [replace(item, match, repl) for item in data]
return data
|
1b3dc8ac7521ec199cf74ebc8f4d8777827ab9fc
| 18,306 |
import time
def get_current_date() ->str:
"""Forms a string to represent the current date using the time module"""
if len(str(time.gmtime()[2])) == 1:
current_date = str(time.gmtime()[0]) + '-' + str(time.gmtime()[1]) + '-0' + str(time.gmtime()[2])
else:
current_date = str(time.gmtime()[0]) + '-' + str(time.gmtime()[1]) + '-' + str(time.gmtime()[2])
return current_date
|
480d44fc0153407960eacb875474fc02cb17c6c3
| 18,307 |
def to_jsobj(obj):
"""Convert a Jsonable object to a JSON object, and return it."""
if isinstance(obj, LIST_TYPES):
return [to_jsobj(o) for o in obj]
if obj.__class__.__module__ == "builtins":
return obj
return obj.to_jsobj()
|
ffd43b2d49f6dd0d3b6608a601e3bccc1be1a289
| 18,308 |
def EVAL_find_counter_exemplars(latent_representation_original, Z, idxs, counter_exemplar_idxs):
"""
Compute the values of the goal function.
"""
# prepare the data to apply the diversity optimization
data = np.zeros((len(idxs), np.shape(Z)[1]))
for i in range(len(idxs)):
data[i] = Z[idxs[i]]
# min-max normalization (applied on ALL examples)
scaler = MinMaxScaler()
scaler.fit_transform(data)
# list of points
points = [row for row in scaler.transform(data)]
# MIN MAX normalize instance to explain
instance = scaler.transform((latent_representation_original))
# number of nearest neighbors to consider
knn = 5
kp = {}
lconst = 1
_, d0 = argmax(points, lambda p: -dist(instance, p))
lconst = 0.5 / (-d0)
for p1 in points:
# compute distances
dst = [(p2, dist(p1, p2)) for p2 in points if not np.array_equal(p1, p2)]
# sort
dst = sorted(dst, key=lambda x: x[1])
# add top knn to kp
kp[p1.tobytes()] = set(p2.tobytes() for p2, d in dst[:knn])
# goal function
def g(points):
dpoints, dx = set(), 0
for p1 in points:
# union operator
dpoints |= kp[p1.tobytes()]
dx += dist(p1, instance)
# scaled version 2*cost
return len(dpoints) - 2 * lconst * dx
# get the extracted CF
extracted_CF_data = []
for i in range(len(counter_exemplar_idxs)):
extracted_CF_data.append(Z[counter_exemplar_idxs[i]])
# apply scaling
extracted_CF_data = scaler.transform((extracted_CF_data))
return g(extracted_CF_data)
|
985d260c74e78ffa6a47a52d6d5d30043b0b5495
| 18,309 |
from re import M
def min_energy(bond):
"""Calculate minimum energy.
Args:
bond: an instance of Bond or array[L1*L2][3].
"""
N_unit = L1*L2
coupling = bond.bond if isinstance(bond, Bond) else bond
# Create matrix A
a = np.zeros((N_unit, N_unit), dtype=float)
for i in range(N_unit):
a[i][nn_1(i)] += coupling[i][0]
a[i][nn_2(i)] += coupling[i][1]
a[i][i] += coupling[i][2]
u,s,vt = sl.svd(a)
det_u = sl.det(u)
det_vt = sl.det(vt)
# calculate parity of the projection operator
## product of u_{ij}
sgn = np.prod(np.sign(coupling))
## from boundary condition
if (L1+L2+M*(L1-M))%2 != 0: sgn *= -1 # (-1)^theta
## det(Q) = det(VU)
sgn *= det_u*det_vt
min_epsilon = min(s)
sum_epsilon = -0.5*sum(s)
ene_phys = sum_epsilon
ene_unphys = sum_epsilon + min_epsilon
# judge whether the vacuume state is physical or not
if sgn < 0: # The vacuum state is unphysical.
ene_phys, ene_unphys = ene_unphys, ene_phys
return ene_phys,ene_unphys,min_epsilon,sgn,det_u,det_vt
|
e86adbde2cbb5135962360fd67c45704c935c123
| 18,310 |
from typing import Tuple
def find_result_node(flat_graph: dict) -> Tuple[str, dict]:
"""
Find result node in flat graph
:return: tuple with node id (str) and node dictionary of the result node.
"""
result_nodes = [(key, node) for (key, node) in flat_graph.items() if node.get("result")]
if len(result_nodes) == 1:
return result_nodes[0]
elif len(result_nodes) == 0:
raise ProcessGraphVisitException("Found no result node in flat process graph")
else:
keys = [k for (k, n) in result_nodes]
raise ProcessGraphVisitException(
"Found multiple result nodes in flat process graph: {keys!r}".format(keys=keys))
|
d0aa0e7ba71c4eb9412393a3bea40965db1525fe
| 18,311 |
import string
import random
def password_generator(size=25, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):
"""Returns a random 25 character password"""
return ''.join(random.choice(chars) for _ in range(size))
|
66cedb858d7ddef9b93b4f9ed8ffd854a722c14b
| 18,312 |
def create_cleaned_df(df, class_label_str):
"""Transform the wide-from Dataframe (df) from main.xlsx into one with
unique row names, values 0-1001 as the column names and a label column
containing the class label as an int.
Parameters
----------
df : pandas DataFrame
A DataFrame read in from main.xlsx. It must have columns 'Name',
'Analyte' and 'Concentration'.
class_label_str: str (len 2)
The class label for the dataframe. It must be two characters long and
one of 'Cu', 'Cd', 'Pb' or 'Sw'.
Returns
-------
pandas DataFrame
Wide-form dataframe with unique row and column names and a label column.
"""
# Replace spaces with underscores in Concentration column
df['Concentration'] = df['Concentration'].str.replace(' ', '_')
# Create new column (we will use this to extract unique names later on)
df['metal_concentration'] = df['Analyte'] + '_' + df['Concentration']
df = df.drop(columns=['Name', 'Analyte', 'Concentration'])
# Transpose df (now columns are a range - 0, 1, 2, etc.)
df['metal_concentration'] = [f'{name}_{i}' for i, name in enumerate(df['metal_concentration'])]
df = df.set_index('metal_concentration')
df.index.name = None
df.columns = range(0, 1002)
class_label_to_int_mapping = get_class_label_to_int_mapping()
df['label'] = class_label_to_int_mapping[class_label_str]
return df
|
ac40ce5af2221db6e984537dfd3a8dcb5f25a4a7
| 18,313 |
def uni_to_int(dxu, x, lambda_val):
"""
Translates from single integrator to unicycle dynamics.
Parameters
----------
dxu :
Single integrator control input.
x :
Unicycle states (3 x N)
lambda_val :
Returns
-------
dx :
"""
n = dxu.shape[1]
dx = np.zeros((2, n))
for i in range(0, n):
temp = np.array([[np.cos(x[2, i]), -lambda_val * np.sin(x[2, i])],
[np.sin(x[2, i]), lambda_val * np.cos(x[2, i])]])
dx[:, i] = np.dot(temp, dxu[:, i])
return dx
|
bfd9f0f0e62fbedb9611f762267644ecb2b3de30
| 18,314 |
import struct
def pack_binary_command(cmd_type, cmd_args, is_response=False):
"""Packs the given command using the parameter ordering specified in GEARMAN_PARAMS_FOR_COMMAND.
*NOTE* Expects that all arguments in cmd_args are already str's.
"""
expected_cmd_params = GEARMAN_PARAMS_FOR_COMMAND.get(cmd_type, None)
if expected_cmd_params is None or cmd_type == GEARMAN_COMMAND_TEXT_COMMAND:
raise ProtocolError('Received unknown binary command: %s' % get_command_name(cmd_type))
expected_parameter_set = set(expected_cmd_params)
received_parameter_set = set(cmd_args.keys())
if expected_parameter_set != received_parameter_set:
raise ProtocolError('Received arguments did not match expected arguments: %r != %r' % (expected_parameter_set, received_parameter_set))
# Select the right expected magic
if is_response:
magic = MAGIC_RES_STRING
else:
magic = MAGIC_REQ_STRING
# !NOTE! str should be replaced with bytes in Python 3.x
# We will iterate in ORDER and str all our command arguments
if compat.any(type(param_value) != str for param_value in cmd_args.itervalues()):
raise ProtocolError('Received non-binary arguments: %r' % cmd_args)
data_items = [cmd_args[param] for param in expected_cmd_params]
# Now check that all but the last argument are free of \0 as per the protocol spec.
if compat.any('\0' in argument for argument in data_items[:-1]):
raise ProtocolError('Received arguments with NULL byte in non-final argument')
binary_payload = NULL_CHAR.join(data_items)
# Pack the header in the !4sII format then append the binary payload
payload_size = len(binary_payload)
packing_format = '!4sII%ds' % payload_size
return struct.pack(packing_format, magic, cmd_type, payload_size, binary_payload)
|
bbe87233d338344ef2ed21b9555fcd4c22c959dc
| 18,315 |
import cupy as cp
import cupyx.scipy.sparse.linalg as cp_linalg
def get_adjacency_spectrum(graph, k=np.inf, eigvals_only=False, which='LA', use_gpu=False):
"""
Gets the top k eigenpairs of the adjacency matrix
:param graph: undirected NetworkX graph
:param k: number of top k eigenpairs to obtain
:param eigvals_only: get only the eigenvalues i.e., no eigenvectors
:param which: the type of k eigenvectors and eigenvalues to find
:return: the eigenpair information
"""
# get all eigenpairs for small graphs
if len(graph) < 100:
A = nx.adjacency_matrix(graph).todense()
eigpairs = eigh(A, eigvals_only=eigvals_only)
else:
A = nx.to_scipy_sparse_matrix(graph, format='csr', dtype=np.float, nodelist=graph.nodes)
if gpu_available() and use_gpu:
A_gpu = cp.sparse.csr_matrix(A)
eigpairs = cp_linalg.eigsh(A_gpu, k=min(k, len(graph) - 3), which=which, return_eigenvectors=not eigvals_only)
if type(eigpairs) is tuple:
eigpairs = list(eigpairs)
eigpairs[0], eigpairs[1] = cp.asnumpy(eigpairs[0]), cp.asnumpy(eigpairs[1])
else:
eigpairs = cp.asnumpy(eigpairs)
else:
if use_gpu: print('Warning: GPU requested, but not available')
eigpairs = eigsh(A, k=min(k, len(graph) - 1), which=which, return_eigenvectors=not eigvals_only)
return eigpairs
|
aef39f929c949edb13604c0e83b01b4d6025f06d
| 18,316 |
def make_system(*args, **kwargs):
"""
Factory function for contact systems. Checks the compatibility between the
substrate, interaction method and surface and returns an object of the
appropriate type to handle it. The returned object is always of a subtype
of SystemBase.
Parameters:
-----------
substrate -- An instance of HalfSpace. Defines the solid mechanics in
the substrate
surface -- An instance of SurfaceTopography, defines the profile.
Returns
-------
"""
substrate, surface = _make_system_args(*args, **kwargs)
return NonSmoothContactSystem(substrate=substrate, surface=surface)
|
06d0fbd8eb8ec6e39ec6aabb2192ab8f3455846e
| 18,317 |
def _batch_normalization(x, norm, scale=None, norm_epsilon=1e-16, name=None):
"""
Normalizes a tensor by norm, and applies (optionally) a 'scale' \\(\gamma\\) to it, as well as
an 'offset' \\(\beta\\):
\\(\frac{\gamma(x)}{norm} + \beta\\)
'norm', 'scale' are all expected to be of shape:
* they can have the same number of dimensions as the input 'x', with identical sizes as 'x' for
dimensions that are not normalized over, and dimension 1 for the others which are being normalized
over
:param x: 'Tensor'
:param norm:
:param scale:
:param norm_epsilon:
:param name:
:return:
the normalized, scaled, offset tensor
"""
with tf.name_scope(name, "batchnorm", [x, norm, scale]):
inv = tf.rsqrt(tf.square(norm) + norm_epsilon)
if scale is not None:
inv *= scale
# def _debug_print_func(f):
# print("inv={}".format(f))
# return False
# debug_print_op = tf.py_func(_debug_print_func,[inv],[tf.bool])
# with tf.control_dependencies(debug_print_op):
# inv = tf.identity(inv, name="scale")
x *= inv
# def _debug_print_func(f):
# print("x_bn[0]={}".format(f[0, :]))
# # print("x_bn: mean,max,min={},{},{}".format(f.mean(),f.max(),f.min()))
# return False
#
# debug_print_op = tf.py_func(_debug_print_func, [x], [tf.bool])
# with tf.control_dependencies(debug_print_op):
# x = tf.identity(x, name="x_norm")
return x
|
41d2f3be7d42fca4b505318e84afdce63aef2887
| 18,318 |
def place_owner_list(user_id):
"""
It retrieves the list of places for which the user is the owner.
Parameters:
- user_id: id of the user, which is owner and wants to get its own places.
Returns a tuple:
- list of Places owned by the user (empty if the user is not an owner)
- status message
- the http code indicating the type of error, if any
"""
try:
places = Place.get_owner_places(user_id)
except TypeError, e:
return None, str(e), 400
return places, "OK", 200
|
ea2c0df7f4e72bd0b7ace5ca9c341a71fd651b32
| 18,319 |
def job_met_heu(prob_label, tr, te, r, ni, n):
"""MeanEmbeddingTest with test_locs randomized.
tr unused."""
# MeanEmbeddingTest random locations
with util.ContextTimer() as t:
met_heu = tst.MeanEmbeddingTest.create_fit_gauss_heuristic(te, J, alpha, seed=180)
met_heu_test = met_heu.perform_test(te)
return {
#'test_method': met_heu,
'test_result': met_heu_test,
'time_secs': t.secs}
|
6aabf3c2628ecdb98b1ae939040a823be307c6f5
| 18,320 |
def download():
"""Unchanged from web2py.
```
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
```
"""
return response.download(request, db)
|
65e0e27b96b6701f2e04c98be702819908647956
| 18,321 |
import argparse
def setupCLI():
"""
Handles the command line arguments for running the code, making optional date arguments easier and
cleaner to handle
Returns
-------
List of formatted Command Line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('databaseName', type=str)
parser.add_argument('collectionName', type=str)
parser.add_argument('filterwords', type=str)
parser.add_argument("--server", type=str, help="l=local (default), v=VirtualMachine-3")
args = parser.parse_args()
dbName = args.databaseName
collectionName = args.collectionName
filterWordArg = args.filterwords
if args.server:
if str.lower(args.server) == "l":
server = "local"
else:
server = "vm3"
else:
server = "local"
collection = connectToMongo(dbName, collectionName, server)
if " OR " not in filterWordArg:
filterList = [filterWordArg]
filterText = filterWordArg
else:
filterList = filterWordArg.split(" OR ")
filterText = filterWordArg.replace(" OR ", "|")
argList = [collection, server, filterList, filterText]
return argList
|
1bc20d2f6f8edd251d37f82f7d56b80a946d0519
| 18,322 |
import os
def named_cache(path):
"""
Return dictionary of cache with `(package name, package version)` mapped to cache entry.
This is a simple convenience wrapper around :py:func:`packages`.
"""
return {os.path.split(x.path)[1]: x for x in packages(path)}
|
2d860582ba5616df7e7d1d20c2f756859a4c62f7
| 18,323 |
def action_id2arr(ids):
""" Converts action from id to array format (as understood by the environment) """
return actions[ids]
|
4d3f54078e99c73e0509b36ee39806c721690551
| 18,324 |
import argparse
import sys
def get_args():
"""Get command-line arguments."""
parser = argparse.ArgumentParser(
description='Emulate the tac program: print file(s) last line first.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file',
type=argparse.FileType('rt'),
nargs='*',
help='Input file(s)',
metavar='FILE',
default=[sys.stdin])
return parser.parse_args()
|
e85c1ce545181f579270279e0225e628816ea888
| 18,325 |
from numpy import arccos, pi, dot
from numpy.linalg import norm
def cif_from_ase(ase,full_occupancies=False,add_fake_biso=False):
"""
Construct a CIF datablock from the ASE structure. The code is taken
from
https://wiki.fysik.dtu.dk/ase/epydoc/ase.io.cif-pysrc.html#write_cif,
as the original ASE code contains a bug in printing the
Hermann-Mauguin symmetry space group symbol.
:param ase: ASE "images"
:return: array of CIF datablocks
"""
if not isinstance(ase, (list, tuple)):
ase = [ase]
datablocks = []
for i, atoms in enumerate(ase):
datablock = dict()
cell = atoms.cell
a = norm(cell[0])
b = norm(cell[1])
c = norm(cell[2])
alpha = arccos(dot(cell[1], cell[2])/(b*c))*180./pi
beta = arccos(dot(cell[0], cell[2])/(a*c))*180./pi
gamma = arccos(dot(cell[0], cell[1])/(a*b))*180./pi
datablock['_cell_length_a'] = str(a)
datablock['_cell_length_b'] = str(b)
datablock['_cell_length_c'] = str(c)
datablock['_cell_angle_alpha'] = str(alpha)
datablock['_cell_angle_beta'] = str(beta)
datablock['_cell_angle_gamma'] = str(gamma)
if atoms.pbc.all():
datablock['_symmetry_space_group_name_H-M'] = 'P 1'
datablock['_symmetry_int_tables_number'] = str(1)
datablock['_symmetry_equiv_pos_as_xyz'] = ['x, y, z']
datablock['_atom_site_label'] = []
datablock['_atom_site_fract_x'] = []
datablock['_atom_site_fract_y'] = []
datablock['_atom_site_fract_z'] = []
datablock['_atom_site_type_symbol'] = []
if full_occupancies:
datablock['_atom_site_occupancy'] = []
if add_fake_biso:
datablock['_atom_site_thermal_displace_type'] = []
datablock['_atom_site_B_iso_or_equiv'] = []
scaled = atoms.get_scaled_positions()
no = {}
for i, atom in enumerate(atoms):
symbol = atom.symbol
if symbol in no:
no[symbol] += 1
else:
no[symbol] = 1
datablock['_atom_site_label'].append(symbol + str(no[symbol]))
datablock['_atom_site_fract_x'].append(str(scaled[i][0]))
datablock['_atom_site_fract_y'].append(str(scaled[i][1]))
datablock['_atom_site_fract_z'].append(str(scaled[i][2]))
datablock['_atom_site_type_symbol'].append(symbol)
if full_occupancies:
datablock['_atom_site_occupancy'].append(str(1.0))
if add_fake_biso:
datablock['_atom_site_thermal_displace_type'].append('Biso')
datablock['_atom_site_B_iso_or_equiv'].append(str(1.0))
datablocks.append(datablock)
return datablocks
|
e6f7abc9d8142d5e607d5cf8496104fa7468c7c3
| 18,326 |
from promgen import models
def breadcrumb(instance=None, label=None):
"""
Create HTML Breadcrumb from instance
Starting with the instance, walk up the tree building a bootstrap3
compatiable breadcrumb
"""
def site(obj):
yield reverse("site-detail"), obj.domain
def shard(obj):
yield reverse("shard-list"), _("Shards")
yield obj.get_absolute_url(), obj.name
def service(obj):
yield reverse("service-list"), _("Services")
yield obj.get_absolute_url(), obj.name
def project(obj):
yield from service(obj.service)
yield obj.get_absolute_url(), obj.name
def alert(obj):
yield reverse("alert-list"), _("Alerts")
yield obj.get_absolute_url(), obj.pk
def rule(obj):
if obj.content_type.model == "site":
yield from site(obj.content_object)
if obj.content_type.model == "service":
yield from service(obj.content_object)
if obj.content_type.model == "project":
yield from project(obj.content_object)
# If we have a new rule, it won't have a name
if obj.pk:
yield obj.get_absolute_url(), obj.name
def sender(obj):
if obj.content_type.model == "service":
yield from service(obj.content_object)
if obj.content_type.model == "project":
yield from project(obj.content_object)
def generator():
yield reverse("home"), _("Home")
if isinstance(instance, models.Sender):
yield from sender(instance)
if isinstance(instance, models.Project):
yield from project(instance)
if isinstance(instance, models.Service):
yield from service(instance)
if isinstance(instance, models.Shard):
yield from shard(instance)
if isinstance(instance, models.Rule):
yield from rule(instance)
if isinstance(instance, models.Alert):
yield from alert(instance)
def to_tag():
yield '<ol class="breadcrumb">'
for href, text in generator():
yield format_html('<li><a href="{}">{}</a></li>', mark_safe(href), text)
if label:
yield format_html('<li class="active">{}</li>', _(label))
yield "</ol>"
return mark_safe("".join(to_tag()))
|
5fec6c8b6d1bfdadec9405b3a4b73b119d7357f9
| 18,327 |
import os
def parse_body_at(path_to_hdf5, num_frame, all=False):
"""
:param path_to_hdf5: path to annotations 'hdf5' file
:param num_frame: frame to extract annotations from
:param all: if True, all original landmarks are returned. Otherwise, only those used for evaluation are returned.
:return: confidence, landmarks, valid (only useful for validation and test sets)
"""
assert os.path.exists(path_to_hdf5) or path_to_hdf5.split(".")[-1].lower() != "hdf5", "HDF5 file could not be opened."
with h5py.File(path_to_hdf5, "r") as f:
key_frame = f"{num_frame:05d}"
if "body" not in f[key_frame]:
return 0, [], False
# -------- BODY --------
confidence = f[key_frame]["body"].attrs["confidence"] if "confidence" in f[key_frame]["body"].attrs.keys() else 0
valid = f[key_frame]["body"].attrs["valid"] if "valid" in f[key_frame]["body"].attrs.keys() else False
landmarks = None
if "landmarks" in f[key_frame]["body"].keys():
landmarks = f[key_frame]["body"]["landmarks"][()]
if not all and landmarks is not None:
landmarks = filter_body_landmarks(landmarks).astype(int)
return confidence, landmarks, valid
|
af0e6efef103f1c018d7f74de2e63660444166e3
| 18,328 |
def get_loader(data_args,
transform_args,
split,
task_sequence,
su_frac,
nih_frac,
cxr_frac,
tcga_frac,
batch_size,
is_training=False,
shuffle=False,
study_level=False,
frontal_lateral=False,
return_info_dict=False,
covar_list='',
fold_num=None):
"""Returns a dataset loader.
If both stanford_frac and nih_frac is one, the loader
will sample both NIH and Stanford data.
Args:
su_frac: Float that specifies what percentage of stanford to load.
nih_frac: Float that specifies what percentage of NIH to load.
cxr_frac: Dictionary that specifies what fraction of each CXR dataset is needed.
# TODO: remove all the frac arguments and instead pass a dictionary
split: String determining if this is the train, valid, test, or sample split.
shuffle: If true, the loader will shuffle the data.
study_level: If true, creates a loader that loads the image on the study level.
Only applicable for the SU dataset.
frontal_lateral: If true, loads frontal/lateral labels.
Only applicable for the SU dataset.
return_info_dict: If true, return a dict of info with each image.
covar_list: List of strings, specifying the covariates to be sent along with the images.
Return:
DataLoader: A dataloader
"""
if is_training:
study_level = data_args.train_on_studies
datasets = []
for cxr_ds in ['pocus', 'hocus', 'pulm']:
if cxr_ds in cxr_frac.keys() and cxr_frac[cxr_ds] != 0:
if cxr_ds == 'pocus':
data_dir = data_args.pocus_data_dir
img_dir = None
elif cxr_ds == 'hocus':
data_dir = data_args.hocus_data_dir
img_dir = None
else:
data_dir = data_args.pulm_data_dir
img_dir = data_args.pulm_img_dir
datasets.append(
CXRDataset(
data_dir,
transform_args, split=split,
covar_list=covar_list,
is_training=is_training,
dataset_name=cxr_ds,
tasks_to=task_sequence,
frac=cxr_frac[cxr_ds],
toy=data_args.toy,
img_dir=img_dir,
fold_num=fold_num,
)
)
if len(datasets) == 2:
assert study_level is False, "Currently, you can't create concatenated datasets when training on studies"
dataset = ConcatDataset(datasets)
else:
dataset = datasets[0]
# Pick collate function
if study_level and not data_args.eval_tcga:
collate_fn = PadCollate(dim=0)
loader = data.DataLoader(dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=8,
collate_fn=collate_fn)
else:
loader = data.DataLoader(dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=8)
return loader
|
9f29c63a0d20c71bc6d51f97ddc820709d978caa
| 18,329 |
from typing import Optional
def get_domain(arn: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainResult:
"""
The resource schema to create a CodeArtifact domain.
:param str arn: The ARN of the domain.
"""
__args__ = dict()
__args__['arn'] = arn
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:codeartifact:getDomain', __args__, opts=opts, typ=GetDomainResult).value
return AwaitableGetDomainResult(
arn=__ret__.arn,
name=__ret__.name,
owner=__ret__.owner,
permissions_policy_document=__ret__.permissions_policy_document,
tags=__ret__.tags)
|
bed4130d5ec77f859cfa7e200794990792b0d2e8
| 18,330 |
def collect_properties(service_instance, view_ref, obj_type, path_set=None, include_mors=False):
"""
Collect properties for managed objects from a view ref
Check the vSphere API documentation for example on retrieving
object properties:
- http://goo.gl/erbFDz
Original Source: https://github.com/dnaeon/py-vconnector/blob/master/src/vconnector/core.py
Modified for my purposes here.
:param pyVmomi.vim.view.* view_ref: Starting point of inventory navigation
:param pyVmomi.vim.* obj_type: Type of managed object
:param list path_set: List of properties to retrieve
:param bool include_mors: If True include the managed objects refs in the result
:return: A list of properties for the managed objects
:rtype list:
"""
collector = service_instance.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
data = []
for obj in props:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
if include_mors:
properties['obj'] = obj.obj
data.append(properties)
return data
|
98cc1f6baa38f6c82452a0c3c5efb13a86a17b9e
| 18,331 |
import signal
def _timeout(seconds):
"""Decorator for preventing a function from running for too long.
Inputs:
seconds (int): The number of seconds allowed.
Notes:
This decorator uses signal.SIGALRM, which is only available on Unix.
"""
assert isinstance(seconds, int), "@timeout(sec) requires an int"
def _handler(signum, frame):
"""Handle the alarm by raising a custom exception."""
raise TimeoutError("Timeout after {0} seconds".format(seconds))
def decorator(func):
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handler)
signal.alarm(seconds) # Set the alarm.
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0) # Turn the alarm off.
return result
return wraps(func)(wrapper)
return decorator
|
dec9909e662c3943d2bba56e116d6c212201aa42
| 18,332 |
def marginal_entropy(problem: dict, train_ixs: np.ndarray, obs_labels: np.ndarray, unlabeled_ixs: np.ndarray,
batch_size: int, **kwargs) -> np.ndarray:
"""
Score is -p(x)log[p(x)] i.e. marginal entropy of the point.
:param problem: dictionary that defines the problem, containing keys:
* points: an (n_samples, n_dim) matrix of points in the space
* num_classes: the number of different classes [0, num_classes)
* batch_size: number of points to query each iteration
* num_queries: the max number of queries we can make on the data
* model: the sk-learn model we are training
:param train_ixs: index into `points` of the training examples
:param obs_labels: labels for the training examples
:param unlabeled_ixs: indices into problem['points'] to score
:param kwargs: unused
:return: scores for each of selected_ixs
"""
points = problem['points']
model = problem['model']
test_X = points[unlabeled_ixs]
p_x = model.predict_proba(test_X)
p_x = p_x.clip(1e-9, 1 - 1e-9)
logp_x = np.log(p_x)
return -1 * (p_x * logp_x).sum(axis=1)
# return 1/ np.abs(model.decision_function(test_X))
|
78751d2cae853f9595579578c38bf2974cea4316
| 18,333 |
def prepareNNImages(bact_img, ftsz_img, model, bacteria=False):
"""Preprocess raw iSIM images before running them throught the neural network.
Returns a 3D numpy array that contains the data for the neural network and the
positions dict generated by getTilePositions for tiling.
"""
# Set iSIM specific values
pixelCalib = 56 # nm per pixel
sig = 121.5 / 81 # in pixel
resizeParam = pixelCalib / 81 # no unit
try:
nnImageSize = model.layers[0].input_shape[0][1]
except AttributeError:
nnImageSize = model
positions = None
# Preprocess the images
if nnImageSize is None or ftsz_img.shape[1] > nnImageSize:
# Adjust to 81nm/px
bact_img = transform.rescale(bact_img, resizeParam)
ftsz_img = transform.rescale(ftsz_img, resizeParam)
# This leaves an image that is smaller then initially
# gaussian and background subtraction
bact_img = filters.gaussian(bact_img, sig, preserve_range=True)
ftsz_img = filters.gaussian(
ftsz_img, sig, preserve_range=True
) - filters.gaussian(ftsz_img, sig * 5, preserve_range=True)
# Tiling
if nnImageSize is not None:
positions = getTilePositionsV2(ftsz_img, nnImageSize)
contrastMax = 255
else:
contrastMax = 1
# Contrast
ftsz_img = exposure.rescale_intensity(
ftsz_img, (np.min(ftsz_img), np.max(ftsz_img)), out_range=(0, contrastMax)
)
bact_img = exposure.rescale_intensity(
bact_img, (np.mean(bact_img), np.max(bact_img)), out_range=(0, contrastMax)
)
else:
positions = {
"px": [(0, 0, ftsz_img.shape[1], ftsz_img.shape[1])],
"n": 1,
"overlap": 0,
"stitch": 0,
}
# Put into format for the network
if nnImageSize is not None:
ftsz_img = ftsz_img.reshape(1, ftsz_img.shape[0], ftsz_img.shape[0], 1)
bact_img = bact_img.reshape(1, bact_img.shape[0], bact_img.shape[0], 1)
inputDataFull = np.concatenate((bact_img, ftsz_img), axis=3)
# Cycle through these tiles and make one array for everything
i = 0
inputData = np.zeros(
(positions["n"] ** 2, nnImageSize, nnImageSize, 2), dtype=np.uint8()
)
for position in positions["px"]:
inputData[i, :, :, :] = inputDataFull[
:, position[0] : position[2], position[1] : position[3], :
]
if bacteria:
inputData[i, :, :, 1] = exposure.rescale_intensity(
inputData[i, :, :, 1],
(0, np.max(inputData[i, :, :, 1])),
out_range=(0, 255),
)
inputData[i, :, :, 0] = exposure.rescale_intensity(
inputData[i, :, :, 0],
(0, np.max(inputData[i, :, :, 0])),
out_range=(0, 255),
)
i = i + 1
inputData = inputData.astype("uint8")
else:
# This is now missing the tile-wise rescale_intensity for the mito channel.
# Image shape has to be in multiples of 4, not even quadratic
cropPixels = (
bact_img.shape[0] - bact_img.shape[0] % 4,
bact_img.shape[1] - bact_img.shape[1] % 4,
)
bact_img = bact_img[0 : cropPixels[0], 0 : cropPixels[1]]
ftsz_img = ftsz_img[0 : cropPixels[0], 0 : cropPixels[1]]
positions = getTilePositionsV2(bact_img, 128)
bact_img = bact_img.reshape(1, bact_img.shape[0], bact_img.shape[0], 1)
ftsz_img = ftsz_img.reshape(1, ftsz_img.shape[0], ftsz_img.shape[0], 1)
inputData = np.stack((bact_img, ftsz_img), 3)
return inputData, positions
|
b43f064ebbb63043db047cc406ad64df1edb0b44
| 18,334 |
def get_key(val):
"""
Get dict key by value
:param val:
:return:
"""
for key, value in HANDSHAKE.items():
if val == value:
return key
|
df5924127bec434cadbfae3a4d9e347c55678ae5
| 18,335 |
import re
def valid_text(val, rule):
"""Return True if regex fully matches non-empty string of value."""
if callable(rule):
match = rule(val)
else:
match = re.findall(rule, val)
return (False if not match or not val else
True if match is True else
match[0] == val)
|
aa6f6ac3a3210d34b44eba1f2e8e8cff851ff038
| 18,336 |
def settings():
"""Render the settings page."""
c = mongo.db[app.config['USERS_COLLECTION']]
user = c.find_one({'username': current_user.get_id()})
if not user:
return render_template()
user['id'] = str(user['_id'])
user.pop('_id', None)
return render_template('settings.html', user=user)
|
3a6e3cb38680aea581f3fda1edb11fb0237df355
| 18,337 |
def exportAllScansS3(folder_id):
""" Exports all Tenable scans found in a folder to S3. """
scan_list = []
scans = client.scan_helper.scans(folder_id=folder_id)
for scan in scans:
if scan.status() != 'completed':
continue
scan.download("./%s.html" % scan.details().info.name, format='html')
scan_list.append(scan.id)
return scan_list
|
2f460d5cc0d96bfcaab8fca8d1f103120ae78ca4
| 18,338 |
import six
def check_ontology_graph(ontology_key, survol_agent=None):
"""This checks that a full ontology contains a minimal subset of classes and attributes.
This is for testing purpose only."""
url_script = {
"survol": "ontologies/Survol_RDFS.py",
"wmi": "ontologies/WMI_RDFS.py",
"wbem": "ontologies/WBEM_RDFS.py"}[ontology_key]
if survol_agent:
# TODO: The url syntax differences between SourceLocal and SourceRemote are not convenient.
# TODO: Remove this leading "/" slash.
my_source = SourceRemote(survol_agent + "/survol/" + url_script)
else:
my_source = SourceLocal(url_script)
ontology_survol = my_source.get_content_moded(None)
assert isinstance(ontology_survol, six.binary_type)
ontology_graph = rdflib.Graph()
result = ontology_graph.parse(data=ontology_survol, format="application/rdf+xml")
return lib_kbase.check_minimal_rdsf_ontology(ontology_graph)
|
a2508826f7e9438b58a5696aa38c1b947aa7c381
| 18,339 |
import functools
import itertools
import time
import random
import traceback
import sys
def Retry(retry_value=Exception,
max_retries=None,
initial_delay_sec=1.0,
delay_growth_factor=1.5,
delay_growth_fuzz=0.1,
max_delay_sec=60):
"""Returns a retry decorator."""
if max_retries is None:
max_retries = 2**30 # Effectively forever.
if delay_growth_factor < 1.0:
raise ValueError("Invalid delay_growth_factor: %f" % delay_growth_factor)
def _Retry(func):
@functools.wraps(func)
def Wrapper(*args, **kwargs):
"""Decorator wrapper."""
delay = initial_delay_sec
for retries in itertools.count(0):
try:
return func(*args, **kwargs)
except retry_value as e:
if retries >= max_retries:
raise
time.sleep(delay)
fuzz_factor = 1.0 + random.random() * delay_growth_fuzz
delay += delay * (delay_growth_factor - 1) * fuzz_factor
delay = min(delay, max_delay_sec)
e_desc_str = "".join(traceback.format_exception_only(e.__class__, e))
stack_traceback_str = "".join(traceback.format_stack()[:-2])
e_traceback = sys.exc_info()[2]
e_traceback_str = "".join(traceback.format_tb(e_traceback))
tf.logging.info(
"Retry: caught exception: %s while running %s. "
"Call failed at (most recent call last):\n%s"
"Traceback for above exception (most recent call last):\n%s"
"Waiting for %.2f seconds before retrying.", func.__name__,
e_desc_str, stack_traceback_str, e_traceback_str, delay)
return Wrapper
return _Retry
|
67e3abf2464ba65c070d8d19fa0a817a26e168cc
| 18,340 |
def download_handler(resource, _, filename=None, inline=False, activity_id=None):
"""Get the download URL from LFS server and redirect the user there
"""
if resource.get('url_type') != 'upload' or not resource.get('lfs_prefix'):
return None
context = get_context()
data_dict = {'resource': resource,
'filename': filename,
'inline': inline,
'activity_id': activity_id}
resource_download_spec = tk.get_action('get_resource_download_spec')(context, data_dict)
href = resource_download_spec.get('href')
if href:
return tk.redirect_to(href)
else:
return tk.abort(404, tk._('No download is available'))
|
16fcf2ae97ff5d8d2d0d206c1e3035092a034006
| 18,341 |
def body_open():
"""open the main logic"""
return " @coroutine\n def __query(__connection):"
|
d8792f2b3237f024f20a12c6b7d371af1dbdb21e
| 18,342 |
import sqlite3
def db_add_entry(user_auth,\
name:str, user_id:str, user_pw:str, url:str=''):
"""
Add an entry into the credentials database, and returns the inserted row.
If insertion fails, return None.
"""
# SQL Query
sql = f'INSERT INTO {DB_TABLE}'
sql += '(name, user_id, user_pw, url, date_created, date_modified) '
sql += 'VALUES(?, ?, ?, ?, ?, ?)'
# Params
user_id_enc = user_auth.encrypt(user_id)
user_pw_enc = user_auth.encrypt(user_pw)
current_ts = get_current_ts()
sql_params = [name, user_id_enc, user_pw_enc, url, current_ts, current_ts]
entry_id = -1
# Run SQL
try:
with user_auth.conn as conn:
cur = conn.cursor()
cur.execute(sql, sql_params)
entry_id = cur.lastrowid
cur.close()
except sqlite3.DatabaseError:
return False
# Sign the entry
user_auth.sign_entry(entry_id, update_db=True)
return True
|
217c54215c9ee49ed8a32b6093e2b274ce0fe7fe
| 18,343 |
def First():
"""(read-only) Sets the first sensor active. Returns 0 if none."""
return lib.Sensors_Get_First()
|
255eb920dae36a01f5e430c44a6922db7eaac0c9
| 18,344 |
def rch_from_model_ds(model_ds, gwf):
"""get recharge package from model dataset.
Parameters
----------
model_ds : xarray.Dataset
dataset with model data.
gwf : flopy ModflowGwf
groundwaterflow object.
Returns
-------
rch : flopy ModflowGwfrch
rch package
"""
# create recharge package
rch = recharge.model_datasets_to_rch(gwf, model_ds)
return rch
|
6af1fef950a951026d082762fd1dce99af7a3ab1
| 18,345 |
def _drawdots_on_origin_image(mats, usage, img, notation_type, color=['yellow', 'green', 'blue', 'red']):
"""
For visualizatoin purpose, draw different color on original image.
:param mats:
:param usage: Detection or Classfifcation
:param img: original image
:param color: color list for each category
:return: dotted image
"""
if usage == 'Classification':
for i, mat in enumerate(mats):
mat_content = mat['detection']
_draw_points(mat_content, img, color[i], notation_type=notation_type)
elif usage == 'Detection':
mat_content = mats['detection']
_draw_points(mat_content, img, color[0], notation_type=notation_type)
return img
|
12ae7544c1ddc415c237835cb14b112e186d0d15
| 18,346 |
def scale(*args, x = 1, y = 1):
"""
Returns a transformation which scales a path around the origin by the specified amount.
`scale(s)`: Scale uniformly by `s`.
`scale(sx, sy)`: Scale by `sx` along the x axis and by `sy` along the y axis.
`scale(x = sx)`: Scale along the x axis only.
`scale(y = sy)`: Scale along the y axis only.
"""
if args:
if len(args) == 1:
args *= 2
x, y = args
return transform(x, 0, 0, 0, y, 0)
|
4853a2dd4dd8145cfbd502a38531a769674c203c
| 18,347 |
import os
import pickle
def load_tweet_users_posted_rumours():
"""
load user history (whether a user posted any rumour in the past)
:return: dict {timestamp at which the user posted a rumour: user_id}
"""
with open(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'tweet_users_posted_rumours'), 'rb') as outfile:
rumour_users = pickle.load(outfile)
outfile.close()
return rumour_users
|
626fc152aae0b38aa4531dafb4d917100bad37c8
| 18,348 |
import os
def create_power_anomaly_pipeline(hparams):
"""
Generate anomalies of types 1 to 4 for a given power time series
"""
pipeline = Pipeline(path=os.path.join('run'))
seed = hparams.seed
# Type 1: Negative power spike potentially followed by zero values and finally a positive power spike
anomaly_type1 = PowerAnomalyGeneration(
'y_hat', anomaly='type1', count=hparams.type1, label=1, seed=seed + 1,
length_params={
'distribution': 'uniform',
'min': hparams.type1_len_min,
'max': hparams.type1_len_max
},
anomaly_params={
'k': hparams.k
}
)(x=pipeline['y'], labels=None)
# Type 2: Drop to potentially zero followed by a positive power spike
anomaly_type2 = PowerAnomalyGeneration(
'y_hat', anomaly='type2', count=hparams.type2, label=2, seed=seed + 2,
length_params={
'distribution': 'uniform',
'min': hparams.type2_len_min,
'max': hparams.type2_len_max,
},
anomaly_params={
'softstart': hparams.type2_softstart
}
)(x=anomaly_type1['y_hat'], labels=anomaly_type1['labels'])
# Type 3: Sudden negative power spike
if hparams.type3_extreme:
anomaly_type3 = PowerAnomalyGeneration(
'y_hat', anomaly='type3', count=hparams.type3, label=32, seed=seed + 4,
anomaly_params={
'is_extreme': True,
'k': hparams.k
}
)(x=anomaly_type2['y_hat'], labels=anomaly_type2['labels'])
else:
anomaly_type3 = PowerAnomalyGeneration(
'y_hat', anomaly='type3', count=hparams.type3, label=31, seed=seed + 3,
anomaly_params={
'is_extreme': False,
'range_r': (hparams.type3_r_min, hparams.type3_r_max),
}
)(x=anomaly_type2['y_hat'], labels=anomaly_type2['labels'])
# Type 4: Sudden positive power spike
anomaly_type4 = PowerAnomalyGeneration(
'y_hat', anomaly='type4', count=hparams.type4, label=4, seed=seed + 5,
anomaly_params={
'range_r': (hparams.type4_r_min, hparams.type4_r_max)
}
)(x=anomaly_type3['y_hat'], labels=anomaly_type3['labels'])
FunctionModule(lambda x: x, name='y')(x=pipeline['y'])
FunctionModule(lambda x: x, name='anomalies')(x=anomaly_type4['labels'])
FunctionModule(lambda x: x, name='y_hat')(x=anomaly_type4['y_hat'])
return pipeline
|
c219434f68237e1d3b69dff924d684d377b9e9c5
| 18,349 |
async def async_setup_entry(hass, config_entry):
"""Set up Tile as config entry."""
websession = aiohttp_client.async_get_clientsession(hass)
client = await async_login(
config_entry.data[CONF_USERNAME],
config_entry.data[CONF_PASSWORD],
session=websession,
)
async def async_update_data():
"""Get new data from the API."""
try:
return await client.tiles.all()
except SessionExpiredError:
LOGGER.info("Tile session expired; creating a new one")
await client.async_init()
except TileError as err:
raise UpdateFailed(f"Error while retrieving data: {err}") from err
coordinator = DataUpdateCoordinator(
hass,
LOGGER,
name=config_entry.title,
update_interval=DEFAULT_UPDATE_INTERVAL,
update_method=async_update_data,
)
await coordinator.async_refresh()
hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id] = coordinator
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True
|
0aa031a56e32335cbe0aab83ff686eb970803b8c
| 18,350 |
def get_selection_uri_template():
"""
Utility function, to build Selection endpoint's Falcon uri_template string
>>> get_selection_uri_template()
'/v1/source/{source_id}/selection.{type}'
"""
str_source_uri = get_uri_template(source.str_route)
path_selection = selection.str_route
param_id = source_parameters.source_id
param_type = selection.str_param_type
str_selection_uri = ''.join(
['/{', param_id, '}/', path_selection, '{', param_type, '}']
)
return str_source_uri+str_selection_uri
|
ec51a7ecc9476dfd060e717c11f4db9255a756dc
| 18,351 |
def conv_single_step(a_slice_prev, W, b):
"""
Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation
of the previous layer.
Arguments:
a_slice_prev -- slice of input data of shape (f, f, n_C_prev)
W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)
b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)
Returns:
Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data
"""
### START CODE HERE ### (≈ 2 lines of code)
# Element-wise product between a_slice and W. Do not add the bias yet.
s = a_slice_prev * W
# Sum over all entries of the volume s.
Z = s.sum()
# Add bias b to Z. Cast b to a float() so that Z results in a scalar value.
Z = Z + np.asscalar(b.astype(np.float))
### END CODE HERE ###
return Z
|
08528925783a6ad2e0d29e602aafe44082a8c50c
| 18,352 |
def _is_pyqt_obj(obj):
"""Checks if ``obj`` wraps an underlying C/C++ object."""
if isinstance(obj, QtCore.QObject):
try:
obj.parent()
return True
except RuntimeError:
return False
else:
return False
|
7be1750807eaee9ab54ee144019909d3c6890c65
| 18,353 |
from typing import Optional
from typing import Tuple
def get_optimizer(
optim_type: str,
optimizer_grouped_parameters,
lr: float,
weight_decay: float,
eps: Optional[float] = 1e-6,
betas: Optional[Tuple[float, float]] = (0.9, 0.999),
momentum: Optional[float] = 0.9,
):
"""
Choose a Pytorch optimizer based on its name.
Parameters
----------
optim_type
Name of optimizer.
optimizer_grouped_parameters
The model parameters to be optimized.
lr
Learning rate.
weight_decay
Optimizer weight decay.
eps
Optimizer eps.
betas
Optimizer betas.
momentum
Momentum used in the SGD optimizer.
Returns
-------
A Pytorch optimizer.
"""
if optim_type == "adamw":
optimizer = optim.AdamW(
optimizer_grouped_parameters,
lr=lr,
weight_decay=weight_decay,
eps=eps,
betas=betas,
)
elif optim_type == "adam":
optimizer = optim.Adam(
optimizer_grouped_parameters,
lr=lr,
weight_decay=weight_decay,
)
elif optim_type == "sgd":
optimizer = optim.SGD(
optimizer_grouped_parameters,
lr=lr,
weight_decay=weight_decay,
momentum=momentum,
)
else:
raise ValueError(f"unknown optimizer: {optim_type}")
return optimizer
|
b80ca6d38ada3aba310656c7609e73a34d8555b7
| 18,354 |
def forward(observations, transitions, sequence_len, batch=False):
"""Implementation of the forward algorithm in Keras.
Returns the log probability of the given observations and transitions
by recursively summing over the probabilities of all paths through
the state space. All probabilities are in logarithmic space.
See e.g. https://en.wikipedia.org/wiki/Forward_algorithm .
Args:
observations (tensor): A tensor of the observation log
probabilities, shape (sequence_len, num_states) if
batch is False, (batch_size, sequence_len, num_states)
otherwise.
transitions (tensor): A (num_states, num_states) tensor of
the transition weights (log probabilities).
sequence_len (int): The number of steps in the sequence.
This must be given because unrolling scan() requires a
definite (not tensor) value.
batch (bool): Whether to run in batchwise mode. If True, the
first dimension of observations corresponds to the batch.
Returns:
Total log probability if batch is False or vector of log
probabiities otherwise.
"""
step = make_forward_step(transitions, batch)
if not batch:
first, rest = observations[0, :], observations[1:, :]
else:
first, rest = observations[:, 0, :], observations[:, 1:, :]
sequence_len -= 1 # exclude first
outputs, _ = scan(step, rest, first, n_steps=sequence_len, batch=batch)
if not batch:
last, axis = outputs[sequence_len-1], 0
else:
last, axis = outputs[:, sequence_len-1], 1
return logsumexp(last, axis=axis)
|
16e104ee3e4a55903b2874cbef74e9e63584174c
| 18,355 |
def fib(n):
"""Return the n'th Fibonacci number.
"""
if n < 0:
raise ValueError("Fibonacci numbers are only defined for n >= 0.")
return _fib(n)
|
e25907deae2884e3ec69ba8ae29fb362aa50dbe3
| 18,356 |
def check_column(board):
"""
list -> bool
This function checks if every column has different numbers and returns
True is yes, and False if not.
>>> check_column(["**** ****", "***1 ****", "** 3****", \
"* 4 1****", " 9 5 ", " 6 83 *", "3 1 **", " 8 2***", " 2 ****"])
False
>>> check_column(["**** ****", "***1 ****", "** 3****", \
"* 4 1****", " 9 5 ", " 6 83 *", "3 5 **", " 8 2***", " 2 ****"])
True
"""
length = len(board)
for i in range(length):
one_line = []
for line in board:
if line[i] == '*' or line[i] == ' ':
continue
if line[i] in one_line:
return False
else:
one_line.append(line[i])
return True
|
b903d1b589cd2981cc374ff47f985151d341e7ec
| 18,357 |
def lines_len_in_circle(r, font_size=12, letter_width=7.2):
"""Return the amount of chars that fits each line in a circle according to
its radius *r*
Doctest:
.. doctest::
>>> lines_len_in_circle(20)
[2, 5, 2]
"""
lines = 2 * r // font_size
positions = [
x + (font_size // 2) * (-1 if x <= 0 else 1)
for x in text_y(lines)
]
return [
int(2 * r * cos(asin(y / r)) / letter_width)
for y in positions
]
|
43a7043d1c1632a9683476ab5cfa9d19f8105230
| 18,358 |
import subprocess
import logging
import sys
import string
def exec_psql(conn_str, query, **args): # type: (str, str, dict) -> str
"""
Executes SQL queries by forking and exec-ing '/usr/bin/psql'.
:param conn_str: A "connection string" that defines the postgresql resource in the format
{schema}://{user}:{password}@{host or IP}:{port}/{database}
:param query: The query to be run. It can actually be a script containing multiple queries.
:returns: The comma-separated columns of each line-delimited row of the results of the query.
"""
cmd = ["/usr/bin/psql", "--tuples-only", "-d", conn_str, "-c", query] + list(args.values())
proc = subprocess.Popen(
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
)
proc.wait()
output = proc.communicate()
if proc.returncode != 0:
logging.debug("psql exec failed; stderr: %s\n\tstdout: %s", output[1], output[0])
raise OSError("failed to execute database query")
if sys.version_info.major >= 3:
return output[0].strip()
else:
return string.strip(output[0])
|
ee3200d84dacf245720fdf998769903f8463dea3
| 18,359 |
def capacitorCorrection(m_cap):
"""Apply a correction to the measured capacitance value
to get a value closer to the real value.
One reason this may differ is measurement varies based on frequency.
The measurements are performed at 30Hz but capacitance values
are normally quoted for 1kHz.
The coefficients are based on mutiple linear regression in R
using rms-based measurements of capacitors vs readings from multimeter
plus a fudge for small values!
"""
### These are based on 30Hz sine wave with 2x2k2 + 2x10nF
### + internal_res = 200 and no esr correction
###return -7.599263e-08 + 9.232542e-01 * m_cap + 1.690527e+04 * m_cap * m_cap
### 31Hz sine 2x2k2 + 2x10nF with internal_res = 140 no esr correction
poly2_cor = -6.168148e-08 + 8.508691e-01 * m_cap + 2.556320e+04 * m_cap * m_cap
return poly2_cor if poly2_cor > 30e-9 else m_cap * 0.2
|
1942c177d534bc5533bb636e10f0107c1230c81d
| 18,360 |
def code128_quebrar_partes(partes):
"""
Obtém as partes em que o Code128 deverá ser quebrado.
Os códigos de barras Code128 requerem que os dados possuam um
comprimento par, para que os dados possam ser codificados nessa
simbologia.
Embora a chave do CF-e-SAT possua 44 digitos, nem todas as mídias
acomodam a impressão completa de 44 dígitos na simbologia Code128,
por isso, o Manual de Orientação do SAT permite que o código de
barras da chave seja quebrado em duas ou mais partes, ficando, por
exemplo, um Code128 com os primeiros 22 dígitos da chave do CF-e e
outro Code128 logo abaixo com os 22 dígitos restantes.
Essa quebra pode ser feita em 4 códigos de barras, mas eles não
podem ser formados por 4 partes de 11 dígitos, devido à limitação da
simbologia Code128 de codificar os dados em sequências pares.
Para dar certo quebrar o Code128 em 4 partes, cada parte precisaria
indicar comprimentos de ``10, 10, 10, 14``, somando 44 dígitos, ou
indicar comprimentos de ``12, 12, 12, 8``, somando 44 dígitos.
:param str partes: Uma string que especifica uma lista de números
inteiros, pares, maiores que zero e separados por vírgulas,
cuja soma deve ser igual a 44, que é o comprimento da chave do
CF-e-SAT.
:returns: Retorna uma tupla de números inteiros, extraídos da string
informada no argumento.
:rtype: tuple
"""
try:
lista_partes = [int(p) for p in partes.split(',')]
except ValueError:
raise ValueError(
(
'Configuracoes do extrato do CF-e-SAT, Code128 em '
'partes deve especificar as partes em valores inteiros, '
'todos numeros pares e separados por virgulas; '
'obtido: {!r}'
).format(partes)
)
# checa se a lista contém apenas números pares
for i, n in enumerate(lista_partes, 1):
if n <= 0 or n % 2 != 0:
raise ValueError(
(
'Configuracoes do extrato do CF-e-SAT, Code128, '
'elemento {!r} deve ser um número par; obtido {!r}'
).format(i, n)
)
calculado = sum(lista_partes)
if calculado != _TAMANHO_CHAVE_CFESAT:
raise ValueError(
(
'Configuracoes do extrato do CF-e-SAT, Code128 em '
'partes deve especificar as partes em valores inteiros, '
'todos numeros pares e separados por virgulas; a soma '
'das partes deve ser igual ao tamanho da chave do '
'CF-e-SAT; obtido: {!r} (esperado: {!r})'
).format(
calculado,
_TAMANHO_CHAVE_CFESAT
)
)
return tuple(lista_partes)
|
98a558742f71e7323da7ff6c2a140962d58e59da
| 18,361 |
def wrist_mounted_calibration(calibration_data_folder, debug=False):
"""
Parse our config file and run handical.
"""
extrinsics_out_file = os.path.join(calibration_data_folder,
'extrinsics.txt')
config_filename = os.path.join(calibration_data_folder, 'robot_data.yaml')
config = read_config(config_filename)
ncols = config['header']['target']['width']
nrows = config['header']['target']['height']
cell_size = config['header']['target']['square_edge_length']
board = ChessBoard((nrows, ncols), cell_size)
# read the X_BE poses
# X_BE_poses = []
X_EB_list = []
for idx, data in enumerate(config['data_list']):
ee_to_base = dict_to_pose(data['hand_frame'])
base_to_ee = ee_to_base.inverse()
# X_BE_poses.append(ee_to_base)
X_EB_list.append(base_to_ee)
init_X_BM = dict_to_pose(
config['header']['target']['transform_to_robot_base'])
cam_intrinsics = []
cam_keys = {}
image_type = config['header']['image_type']
print "image_type: ", image_type
# we only have one camera
if image_type == "ir":
intrinsics_prefix = "depth"
else:
intrinsics_prefix = image_type
intrinsics_filename = os.path.join(calibration_data_folder,
intrinsics_prefix + "_camera_info.yaml")
intrinsics = read_intrinsic(intrinsics_filename)
key = 0
camera_name = config['header']['camera']
cam_intrinsics.append(intrinsics)
cam_keys[camera_name] = key
handical = HandicalWristMount(board.size, board.cell_size, init_X_BM,
X_EB_list, cam_intrinsics)
for pose_id, X_BE in enumerate(X_EB_list):
print "X_BE: ", X_BE
img_filename = os.path.join(
calibration_data_folder,
config['data_list'][pose_id]['images'][image_type]['filename'])
img = load_image_as_grayscale(img_filename)
handical.add_image(img, pose_id, cam_keys[camera_name], debug)
print "graph size: ", handical.backend.graph.size()
results, error = handical.calibrate()
print("Calibration results:")
print results, error
save_results(extrinsics_out_file, results, cam_keys, error)
calibration_results = calibration_results_to_dict(results)
return calibration_results
|
c98f4f2161bcb0f03503173fceb3e03e3825d069
| 18,362 |
def debug(func):
"""Only for debugging purposes: prints a tree
It will print a nice execution tree with arguments and results
of all decorated functions.
"""
if not SYMPY_DEBUG:
#normal mode - do nothing
return func
#debug mode
def decorated(*args, **kwargs):
#r = func(*args, **kwargs)
r = maketree(func, *args, **kwargs)
#print "%s = %s(%s, %s)" % (r, func.__name__, args, kwargs)
return r
return decorated
|
4541bad13af0d3ad6ce4ea368798fa5ea267fdf3
| 18,363 |
def get_s3_keys(bucket):
"""Get a list of keys in an S3 bucket."""
keys = []
resp = s3.list_objects(Bucket=bucket)
for obj in resp['Contents']:
keys.append(obj['Key'])
return keys
|
2efb2e4e9a7ac943c3e35e69a0987933957800e1
| 18,364 |
from scipy.ndimage.filters import maximum_filter, minimum_filter
def plot_maxmin_points(lon, lat, data, extrema, nsize, symbol, color='k',
plotValue=True, transform=None):
"""
This function will find and plot relative maximum and minimum for a 2D grid. The function
can be used to plot an H for maximum values (e.g., High pressure) and an L for minimum
values (e.g., low pressue). It is best to used filetered data to obtain a synoptic scale
max/min value. The symbol text can be set to a string value and optionally the color of the
symbol and any plotted value can be set with the parameter color
lon = plotting longitude values (2D)
lat = plotting latitude values (2D)
data = 2D data that you wish to plot the max/min symbol placement
extrema = Either a value of max for Maximum Values or min for Minimum Values
nsize = Size of the grid box to filter the max and min values to plot a reasonable number
symbol = String to be placed at location of max/min value
color = String matplotlib colorname to plot the symbol (and numerica value, if plotted)
plot_value = Boolean (True/False) of whether to plot the numeric value of max/min point
The max/min symbol will be plotted on the current axes within the bounding frame
(e.g., clip_on=True)
^^^ Notes from MetPy. Function adapted from MetPy.
"""
if (extrema == 'max'):
data_ext = maximum_filter(data, nsize, mode='nearest')
elif (extrema == 'min'):
data_ext = minimum_filter(data, nsize, mode='nearest')
else:
raise ValueError('Value for hilo must be either max or min')
mxy, mxx = np.where(data_ext == data)
for i in range(len(mxy)):
ax.text(lon[mxy[i], mxx[i]], lat[mxy[i], mxx[i]], symbol, color=color, size=13,
clip_on=True, horizontalalignment='center', verticalalignment='center',
fontweight='extra bold',
transform=transform)
ax.text(lon[mxy[i], mxx[i]], lat[mxy[i], mxx[i]],
'\n \n' + str(np.int(data[mxy[i], mxx[i]])),
color=color, size=8, clip_on=True, fontweight='bold',
horizontalalignment='center', verticalalignment='center',
transform=transform, zorder=10)
return ax
|
a1d17972540346e96337e54da72960dc9e1f8afe
| 18,365 |
def _get_results(model):
"""
Helper function to get the results from the solved model instances
"""
_invest = {}
results = solph.processing.convert_keys_to_strings(model.results())
for i in ["wind", "gas", "storage"]:
_invest[i] = results[(i, "electricity")]["scalars"]["invest"]
return _invest
|
d054ef85df5603d8b450c0e35cd65ab5fb1cc278
| 18,366 |
def _get_vlan_list():
""" Aggregate vlan data.
Args:
Returns:
Tree of switches and vlan information by port
"""
log = logger.getlogger()
vlan_list = Tree()
for ntmpl_ind in CFG.yield_ntmpl_ind():
ntmpl_ifcs = CFG.get_ntmpl_ifcs_all(ntmpl_ind)
for ifc in ntmpl_ifcs:
vlan_num, vlan_ifc_name = _get_vlan_info(ifc)
if vlan_num:
vlan_slaves = _get_vlan_slaves(vlan_ifc_name)
for phyintf_idx in CFG.yield_ntmpl_phyintf_data_ind(ntmpl_ind):
phy_ifc_lbl = CFG.get_ntmpl_phyintf_data_ifc(ntmpl_ind, phyintf_idx)
if phy_ifc_lbl in vlan_slaves:
vlan_ports = CFG.get_ntmpl_phyintf_data_ports(
ntmpl_ind, phyintf_idx)
switch = CFG.get_ntmpl_phyintf_data_switch(
ntmpl_ind, phyintf_idx)
if vlan_num in vlan_list[switch]:
vlan_list[switch][vlan_num] += vlan_ports
else:
vlan_list[switch][vlan_num] = vlan_ports
pretty_str = PP.pformat(vlan_list)
log.debug('vlan list')
log.debug('\n' + pretty_str)
# Aggregate by switch and port number
port_vlans = Tree()
for switch in vlan_list:
for vlan in vlan_list[switch]:
for port in vlan_list[switch][vlan]:
if str(port) in port_vlans[switch]:
port_vlans[switch][str(port)].append(vlan)
else:
port_vlans[switch][str(port)] = [vlan]
pretty_str = PP.pformat(port_vlans)
log.debug('port_vlans')
log.debug('\n' + pretty_str)
return port_vlans
|
baae2d837f24d6b887d8facc1c0148d7e64f4239
| 18,367 |
def subtract_overscan(ccd, overscan=None, overscan_axis=1, fits_section=None,
median=False, model=None):
"""
Subtract the overscan region from an image.
Parameters
----------
ccd : `~astropy.nddata.CCDData`
Data to have overscan frame corrected.
overscan : `~astropy.nddata.CCDData` or None, optional
Slice from ``ccd`` that contains the overscan. Must provide either
this argument or ``fits_section``, but not both.
Default is ``None``.
overscan_axis : 0, 1 or None, optional
Axis along which overscan should combined with mean or median. Axis
numbering follows the *python* convention for ordering, so 0 is the
first axis and 1 is the second axis.
If overscan_axis is explicitly set to None, the axis is set to
the shortest dimension of the overscan section (or 1 in case
of a square overscan).
Default is ``1``.
fits_section : str or None, optional
Region of ``ccd`` from which the overscan is extracted, using the FITS
conventions for index order and index start. See Notes and Examples
below. Must provide either this argument or ``overscan``, but not both.
Default is ``None``.
median : bool, optional
If true, takes the median of each line. Otherwise, uses the mean.
Default is ``False``.
model : `~astropy.modeling.Model` or None, optional
Model to fit to the data. If None, returns the values calculated
by the median or the mean.
Default is ``None``.
{log}
Raises
------
TypeError
A TypeError is raised if either ``ccd`` or ``overscan`` are not the
correct objects.
Returns
-------
ccd : `~astropy.nddata.CCDData`
CCDData object with overscan subtracted.
Notes
-----
The format of the ``fits_section`` string follow the rules for slices that
are consistent with the FITS standard (v3) and IRAF usage of keywords like
TRIMSEC and BIASSEC. Its indexes are one-based, instead of the
python-standard zero-based, and the first index is the one that increases
most rapidly as you move through the array in memory order, opposite the
python ordering.
The 'fits_section' argument is provided as a convenience for those who are
processing files that contain TRIMSEC and BIASSEC. The preferred, more
pythonic, way of specifying the overscan is to do it by indexing the data
array directly with the ``overscan`` argument.
Examples
--------
Creating a 100x100 array containing ones just for demonstration purposes::
>>> import numpy as np
>>> from astropy import units as u
>>> arr1 = CCDData(np.ones([100, 100]), unit=u.adu)
The statement below uses all rows of columns 90 through 99 as the
overscan::
>>> no_scan = subtract_overscan(arr1, overscan=arr1[:, 90:100])
>>> assert (no_scan.data == 0).all()
This statement does the same as the above, but with a FITS-style section::
>>> no_scan = subtract_overscan(arr1, fits_section='[91:100, :]')
>>> assert (no_scan.data == 0).all()
Spaces are stripped out of the ``fits_section`` string.
"""
if not (isinstance(ccd, CCDData) or isinstance(ccd, np.ndarray)):
raise TypeError('ccddata is not a CCDData or ndarray object.')
if ((overscan is not None and fits_section is not None) or
(overscan is None and fits_section is None)):
raise TypeError('specify either overscan or fits_section, but not '
'both.')
if (overscan is not None) and (not isinstance(overscan, CCDData)):
raise TypeError('overscan is not a CCDData object.')
if (fits_section is not None and
not isinstance(fits_section, str)):
raise TypeError('overscan is not a string.')
if fits_section is not None:
overscan = ccd[slice_from_string(fits_section, fits_convention=True)]
if overscan_axis is None:
overscan_axis = 0 if overscan.shape[1] > overscan.shape[0] else 1
if median:
oscan = np.median(overscan.data, axis=overscan_axis)
else:
oscan = np.mean(overscan.data, axis=overscan_axis)
if model is not None:
of = fitting.LinearLSQFitter()
yarr = np.arange(len(oscan))
oscan = of(model, yarr, oscan)
oscan = oscan(yarr)
if overscan_axis == 1:
oscan = np.reshape(oscan, (oscan.size, 1))
else:
oscan = np.reshape(oscan, (1, oscan.size))
else:
if overscan_axis == 1:
oscan = np.reshape(oscan, oscan.shape + (1,))
else:
oscan = np.reshape(oscan, (1,) + oscan.shape)
subtracted = ccd.copy()
# subtract the overscan
subtracted.data = ccd.data - oscan
return subtracted
|
9d5d8333949f77e86000f836051c92122b96c87b
| 18,368 |
from datetime import datetime
def read_raw(omega):
"""Read the raw temperature, humidity and dewpoint values from an OMEGA iServer.
Parameters
----------
omega : :class:`msl.equipment.record_types.EquipmentRecord`
The Equipment Record of an OMEGA iServer.
Returns
-------
:class:`str`
The serial number of the OMEGA iServer.
:class:`dict`
The data.
"""
nprobes = omega.connection.properties.get('nprobes', 1)
nbytes = omega.connection.properties.get('nbytes')
error = None
try:
cxn = omega.connect()
thd = cxn.temperature_humidity_dewpoint(probe=1, nbytes=nbytes)
if nprobes == 2:
thd += cxn.temperature_humidity_dewpoint(probe=2, nbytes=nbytes)
cxn.disconnect()
except Exception as e:
error = str(e)
thd = [None] * (nprobes * 3)
now_iso = datetime.now().replace(microsecond=0).isoformat(sep='T')
data = {
'error': error,
'alias': omega.alias,
'datetime': now_iso,
'report_number': None,
}
if len(thd) == 3:
data.update({
'temperature': thd[0], 'humidity': thd[1], 'dewpoint': thd[2]
})
else:
data.update({
'temperature1': thd[0], 'humidity1': thd[1], 'dewpoint1': thd[2],
'temperature2': thd[3], 'humidity2': thd[4], 'dewpoint2': thd[5]
})
return omega.serial, data
|
105e07d26774288319459ebdc485d75c3a909212
| 18,369 |
import math
def get_spell_slots(pcClass, level):
"""Return a list containing the available spell slots for each spell level."""
spell_slots = []
if pcClass.casefold() == "Magic-User".casefold():
highest_spell_level = min(math.ceil(level / 2), 9)
# MU_SPELL_SLOTS[level - 1] gives first level spell slots for the given character
# level. The spell slots for subsequent spell levels move two steps down the
# list each time. So we move two steps down the list for each spell level we
# need beyond the first by subtracting 2 * i from the index.
for i in range(highest_spell_level):
spell_slots.append(MU_SPELL_SLOTS[(level - 1) - (2 * i)])
if pcClass.casefold() == "Cleric".casefold():
# Cleric spell slots are a little strange: they have access to level 1 spells
# if they're 3rd level or lower. Otherwise, they use the same progression as
# magic-users (except that Clerics' highest spell level is 7, not 9).
highest_spell_level = 1 if level <= 3 else min(math.ceil(level / 2), 7)
# Here's the really painful bit. Cleric spell slots ALMOST follow the nice easy
# Magic-User pattern of moving two steps down each time you go up a spell level.
# Almost.
# They actually move 3 steps down the first time (from spell level 1 to spell
# level 2), and then a nice even 2 steps down for every spell level after that.
# Special cases, UGH.
for i in range(highest_spell_level):
if i <= 1:
spell_slots.append(CLERIC_SPELL_SLOTS[(level - 1) - (3 * i)])
else:
spell_slots.append(CLERIC_SPELL_SLOTS[(level - 1) - (2 * i)])
# Sigh. Level 20 is a special case that doesn't follow any handy pattern that I
# could find.
if level == 20:
spell_slots = [8, 7, 7, 6, 5, 5, 4]
return spell_slots
|
792110a79eb00965ea72e067e47c8eff2be4c293
| 18,370 |
from datetime import datetime, timedelta
def determine_dates_to_query_on_matomo(dates_in_database):
"""
Determines which dates need to be queried on Matomo to update the dataset.
"""
# determines which dates are missing from the database and could be queried on Matomo
# NOTE: start date was set to 2020-05-01 as May is when the portal started to be live
start_date = datetime.strptime('2020-05-01', '%Y-%m-%d').date()
end_date = (datetime.today() - timedelta(1)).date()
delta = timedelta(days=1)
dates_to_process = []
while start_date <= end_date:
if str(start_date) not in dates_in_database:
dates_to_process.append(str(start_date))
start_date += delta
return dates_to_process
|
40db63fb7ff339d5c306df37cf0f4b1765b91f90
| 18,371 |
from datetime import datetime
import json
import tempfile
def handleMsg(msgJ):
"""Process the message in msgJ.
Parameters:
msgJ: dict
Dictionary with command sent from client
Returns:
string
JSON string with command response
Commands are of the form:
{'cmd' : 'getCCC', 'param0': 'param0val', ...}
Response is a string of the form (note that JSON is picky that keys
and strings should be enclosed in double quotes:
'{"cmd" : "getCmd", "cmd" : "<response>"}'
{'cmd':'getHello'} -> {"cmd":"getHello", "hello": "world"}
{'cmd':'getSegyHdrs', filename: f} ->
{"cmd":"getSegyHdrs", "segyhdrs":
{ns:nsamps, dt:dt: hdrs:[hdr1, hdr2...]}}
FIXME FIXME - this currently returns "segy", not "ensemble" as the key
WARNING - you must call getSegyHdrs first
flo and fhi are optional. If they are not present, no filtering
{'cmd':'getEnsemble', filename:f, ensemble:n, [flo:flo, fhi: fhi]} ->
{"cmd":"getEnsemble", "segy":
{ns:nsamps, dt:dt: traces:[trc1, trc2...]}}
"""
print('msgJ: {}'.format(msgJ))
if msgJ['cmd'].lower() == 'getsegyhdrs':
filename = msgJ['filename']
print('getting segyhdr >{}<, filename: {}'.format(msgJ, filename))
t0 =datetime.now()
if segy.filename != filename:
# new file - open it
try:
s = _read_su(filename, endian='>', headonly=True)
segy.filename = filename
segy.segyfile = s
except:
ret = json.dumps({"cmd":"readSegy", "error": "Error reading file {}".format(filename)})
return ret
print("ntrcs = {}".format(len(segy.segyfile.traces)))
hdrs = [segy.getTrc(i, headonly=True) for i in range(len(segy.segyfile.traces))]
nsamps = segy.segyfile.traces[0].header.number_of_samples_in_this_trace
dt = segy.segyfile.traces[0].header.sample_interval_in_ms_for_this_trace/(1000.*1000.)
segy.nsamps = nsamps
segy.dt = dt
segy.hdrs = hdrs
ret = json.dumps({"cmd": "getSegyHdrs",
"segyHdrs" : json.dumps({"dt":dt, "ns":nsamps,
"filename": segy.filename,
"hdrs":hdrs})})
return ret
if msgJ['cmd'].lower() == 'getnmo':
# assumes getSegyHdrs called already. needed?
print('nmo getting ens', msgJ)
if segy.segyfile is None:
ret = json.dumps({"cmd":"getNMO", "error": "Error doing NMO: call getSegyHdrs first."})
return ret
try:
vnmo = msgJ['vnmo']
tnmo = msgJ['tnmo']
print('got nmo', vnmo, tnmo)
except:
vnmo = 'vnmo=2000'
tnmo = 'tnmo=0'
try:
ens = int(msgJ['ensemble'])
try:
# open a tmp file
tmpf = tempfile.NamedTemporaryFile(delete=False) # output
print('opened', tmpf.name)
# and the segy input file
with open(msgJ['filename'], 'rb') as sf: # input
# and do the nmo
p1 = sp.Popen(['suwind', 'key=cdp', 'min={}'.format(ens), 'max={}'.format(ens)], stdin=sf, stdout=sp.PIPE)
p2 = sp.Popen(['sugain', "tpow=1.5"], stdin=p1.stdout, stdout=sp.PIPE)
p3 = sp.Popen(['sunmo', vnmo, tnmo], stdin=p2.stdout, stdout=tmpf)
print('p2 ok')
p1.stdout.close()
p2.stdout.close()
out,err = p3.communicate()
print('suwind/sugain/nmo', out, err)
#print('nmo call', ret)
tmpf.close()
# nmo segy file
nsegy = Segy()
nsegy.filename = tmpf.name
nsegy.segyfile = _read_su(tmpf.name, headonly=False)
nmontrcs = len(nsegy.segyfile.traces)
#print('nmo ntrcs', nmontrcs)
nmotrcs = [nsegy.getTrc(i, headonly=False, trctype='seismic') for i in range(nmontrcs)]
# delete the tmp file
#os.unlink(tmpf.name)
print('nmo trcs', len(nmotrcs))
except:
print('err nmo', ens)
ret = json.dumps({"cmd":"getNMO", "error": "Error performing NMO"})
return ret
ntrc = len(nmotrcs)
except:
print('err ens', msgJ)
ret = json.dumps({"cmd":"getNMO", "error": "Error reading ensemble number"})
return ret
print("ens = {} ntrc={}".format(ens, len(nmotrcs)))
# dt/nsamps could change from the original due to decimation
dt = nmotrcs[0]["dt"]
nsamps = nmotrcs[0]["nsamps"]
print('dt, nsamps', dt, nsamps)
#print(json.dumps(traces[0]))
ret = json.dumps({"cmd": "getNMO",
"NMO" : json.dumps({"dt":dt, "ns":nsamps,
"filename": nsegy.filename,
"traces":nmotrcs})})
return ret
if msgJ['cmd'].lower() == 'getvelan':
if segy.segyfile is None:
ret = json.dumps({"cmd":"getEnsemble", "error": "Error reading ensemble"})
return ret
try:
ens = int(msgJ['ensemble'])
print('in velan', ens)
except:
print('no ens')
return json.dumps({"cmd":"getVelan", "error": "Error reading ensemble number"})
try:
dv = msgJ['dv']
fv = msgJ['fv']
nv = msgJ['nv']
except:
fv=1500
dv=100
nv=50
dvstr = "dv={}".format(dv)
fvstr = "fv={}".format(fv)
nvstr = "nv={}".format(nv)
tmpf = tempfile.NamedTemporaryFile(delete=False) # output
with open(segy.filename, 'rb') as sf:# input
#tmpfname = tmpf.name
p1 = sp.Popen(['suwind', 'key=cdp', 'min={}'.format(ens), 'max={}'.format(ens)], stdin=sf, stdout=sp.PIPE)
p2 = sp.Popen(['sugain', "tpow=1.5"], stdin=p1.stdout, stdout=sp.PIPE)
p3 = sp.Popen(['suvelan', dvstr, fvstr, nvstr], stdin=p2.stdout, stdout=tmpf)
print('p3 ok')
p1.stdout.close()
p2.stdout.close()
out,err = p3.communicate()
print('suwind/sugain/velan', out, err)
#ret = sp.call(['suvelan', dvstr, fvstr, nvstr], stdin=sf, stdout=tmpf)
#print('wrote suvelan file', ret, tmpf.name)
tmpf.close()
vsegy = Segy()
vsegy.filename=tmpf.name
vsegy.segyfile = _read_su(tmpf.name, headonly=False)
vtrcs = [vsegy.getTrc(i, headonly=False, trctype='velocity', v0=fv, dv=dv) for i in range(len(vsegy.segyfile.traces)) if vsegy.segyfile.traces[i].header.ensemble_number == ens]
print('nvel trcs', len(vtrcs))
dt = vtrcs[0]["dt"]
nsamps = vtrcs[0]["nsamps"]
print('dt, nsamps', dt, nsamps)
#print(json.dumps(traces[0]))
ret = json.dumps({"cmd": "getVelan",
"velan" : json.dumps({"dt":dt, "ns":nsamps, "fv":fv,
"dv":dv, "nv":nv,
"filename": vsegy.filename,
"traces":vtrcs})})
#ret = json.dumps({"cmd": "velan", "velan": "test"})
return ret
if msgJ["cmd"].lower() == "gethello":
ret = json.dumps({"cmd": "hello", "hello": "world"})
return ret
|
020050ddfd82823decd6e2ecedcd639c1fee3922
| 18,372 |
def calc_total_energy(electron_energy, atomic_distance, energy0):
"""
Calculates the total energy of H2 molecule from electron_energy by
adding proton-proton Coulomb energy and defining the zero energy
energy0. The formula:
E = E_el + E_p - E_0
where e is the total energy, E_el is the electronic energy
E_p = 1 / R, where R is atomic distance and E_0 is the chosen
zero energy.
:param electron_energy: list of energies of H2 molecule without
proton-proton Coulomb energy
:param atomic_distance: list of distances between two H atoms
of H2 molecule
:param energy0: The energy that we take as zero energy
:return: list of total energy of H2 molecule in MJ mol^{-1}
"""
total_energy = [0]*len(electron_energy)
for dot in range(len(electron_energy)):
# proton-proton Coulomb energy
proton_energy = proton_proton(atomic_distance[dot])
total_energy_hartree = electron_energy[dot] + proton_energy - energy0
total_energy[dot] = hartree_to_MJmol(total_energy_hartree)
return total_energy
|
3a948dc26e7147961e9c7677ef2b8b1d8f47d0ab
| 18,373 |
def k8s_stats_response():
"""
Returns K8s /stats/summary endpoint output from microk8s on Jetson Nano.
"""
with open("tests/resources/k8s_response.json", "r") as response_file:
response = response_file.read()
return response
|
68413108eeea6bdd80a782b962f3a5c97e1a4b73
| 18,374 |
def display_credentials():
"""
Function to display saved credentials.
"""
return Credentials.display_credential()
|
0cfb2e7529bd46ae3a05e21aeec25761c062e04b
| 18,375 |
from typing import Optional
from typing import Dict
def evaluate_absence_of_narrow_ranges(
piece: Piece, min_size: int = 9,
penalties: Optional[Dict[int, float]] = None
) -> float:
"""
Evaluate melodic fluency based on absence of narrow ranges.
:param piece:
`Piece` instance
:param min_size:
minimum size of narrow range (in line elements)
:param penalties:
mapping from width of a range (in scale degrees) to penalty
applicable to ranges of not greater width
:return:
multiplied by -1 count of narrow ranges weighted based on their width
"""
penalties = penalties or {2: 1, 3: 0.5}
pitches = [x.scale_element.position_in_degrees for x in piece.counterpoint]
rolling_mins = rolling_aggregate(pitches, min, min_size)[min_size-1:]
rolling_maxs = rolling_aggregate(pitches, max, min_size)[min_size-1:]
borders = zip(rolling_mins, rolling_maxs)
score = 0
for lower_border, upper_border in borders:
range_width = upper_border - lower_border
curr_penalties = [v for k, v in penalties.items() if k >= range_width]
penalty = max(curr_penalties) if curr_penalties else 0
score -= penalty
return score
|
4b487f1f749f31d33852d928b1b56d1489336827
| 18,376 |
from qalgebra.core.scalar_algebra import (
One,
Scalar,
ScalarTimes,
ScalarValue,
Zero,
)
from typing import OrderedDict
def collect_scalar_summands(cls, ops, kwargs):
"""Collect :class:`.ScalarValue` and :class:`.ScalarExpression` summands.
Example::
>>> srepr(collect_scalar_summands(Scalar, (1, 2, 3), {}))
'ScalarValue(6)'
>>> collect_scalar_summands(Scalar, (1, 1, -1), {})
One
>>> collect_scalar_summands(Scalar, (1, -1), {})
Zero
>>> Psi = KetSymbol("Psi", hs=0)
>>> Phi = KetSymbol("Phi", hs=0)
>>> braket = BraKet.create(Psi, Phi)
>>> collect_scalar_summands(Scalar, (1, braket, -1), {})
<Psi|Phi>^(0)
>>> collect_scalar_summands(Scalar, (1, 2 * braket, 2, 2 * braket), {})
((3, 4 * <Psi|Phi>^(0)), {})
>>> collect_scalar_summands(Scalar, (2 * braket, -braket, -braket), {})
Zero
"""
# This routine is required because there is no
# "ScalarTimesQuantumExpression" for scalars: we have to extract
# coefficiencts from ScalarTimes instead
a_0 = Zero
coeff_map = OrderedDict()
for op in ops:
if isinstance(op, ScalarValue) or isinstance(op, Scalar._val_types):
a_0 += op
continue
elif isinstance(op, ScalarTimes):
if isinstance(op.operands[0], ScalarValue):
coeff = op.operands[0]
term = op.operands[1]
for sub_op in op.operands[2:]:
term *= sub_op
else:
coeff, term = One, op
else:
coeff, term = One, op
if term in coeff_map:
coeff_map[term] += coeff
else:
coeff_map[term] = coeff
if a_0 == Zero:
fops = []
else:
fops = [a_0]
for (term, coeff) in coeff_map.items():
op = coeff * term
if not op.is_zero:
fops.append(op)
if len(fops) == 0:
return cls._zero
elif len(fops) == 1:
return fops[0]
else:
return tuple(fops), kwargs
|
a6b7ba05db9e9d6434f217bcc7a67f2b6f7ba22b
| 18,377 |
import os
import gzip
import dill
def sge_submit(
tasks, label, tmpdir, options="-q hep.q", dryrun=False, quiet=False,
sleep=5, request_resubmission_options=True, return_files=False,
dill_kw={"recurse": False},
):
"""
Submit jobs to an SGE batch system. Return a list of the results of each
job (i.e. the return values of the function calls)
Parameters
----------
tasks : list
A list of dictrionaries with the keys: task, args and kwargs. Each
element is run on a node as task(*args, **kwargs).
label : str
Label given to the qsub submission script through -N.
tmpdir : str
Path to temporary directory (doesn't have to exist) where pysge stores
job infomation. Each call will have a unique identifier in the form
tpd_YYYYMMDD_hhmmss_xxxxxxxx. Within this directory exists all tasks in
separate directories with a dilled file, stdout and stderr for that
particular job.
options : str (default = "-q hep.q")
Additional options to pass to the qsub command. Take care since the
following options are already in use: -wd, -V, -e, -o and -t.
dryrun : bool (default = False)
Create directories and files but don't submit the jobs.
quiet : bool (default = False)
Don't print tqdm progress bars. Other prints are controlled by logging.
sleep : float (default = 5)
Minimum time between queries to the batch system.
request_resubmission_options : bool (default = True)
When a job fails the master process will expect an stdin from the user
to alter the submission options (e.g. to increase walltime or memory
requested). If False it will use the original options.
return_files : bool (default = False)
Instead of opening the output files and loading them into python, just
send the paths to the output files and let the user deal with them.
dill_kw : dict
Kwargs to pass to dill.dump
"""
if not _validate_tasks(tasks):
logger.error(
"Invalid tasks. Ensure tasks=[{'task': .., 'args': [..], "
"'kwargs': {..}}, ...], where 'task' is callable."
)
return []
area = WorkingArea(os.path.abspath(tmpdir))
submitter = SGETaskSubmitter(" ".join(['-N {}'.format(label), options]))
monitor = JobMonitor(submitter)
results = []
area.create_areas(tasks, quiet=quiet, dill_kw=dill_kw)
try:
submitter.submit_tasks(area.task_paths, dryrun=dryrun, quiet=quiet)
if not dryrun:
results = monitor.monitor_jobs(
sleep=sleep, request_user_input=request_resubmission_options,
)
except KeyboardInterrupt as e:
submitter.killall()
if return_files:
return results
results_not_files = []
for path in results:
with gzip.open(path, 'rb') as f:
results_not_files.append(dill.load(f))
return results_not_files
|
dd7658d0b16a75a303bdb8124809bdd3f74a4ad5
| 18,378 |
def _is_domain_interval(val):
""" Check if a value is representing a valid domain interval
Args:
val: Value to check
Returns:
True if value is a tuple representing an interval
"""
if not isinstance(val, tuple):
return False
if not (is_int(val[0]) and is_int(val[1]) and (val[1] >= val[0])):
return False
vl = len(val)
if vl == 2:
return True
if vl == 3:
return val[2] == _HOLE_MARKER
return False
|
3de16ddc26429be14ab84825f659dcf05f89f1f3
| 18,379 |
def rndcaps(n):
"""
Generates a string of random capital letters.
Arguments:
n: Length of the output string.
Returns:
A string of n random capital letters.
"""
return "".join([choice(_CAPS) for c in range(n)])
|
0661de89cc1abbbc678f7764f90674f3e5fb7282
| 18,380 |
def cutByWords(text, chunkSize, overlap, lastProp):
"""
Cuts the text into equally sized chunks, where the segment size is measured by counts of words,
with an option for an amount of overlap between chunks and a minim
um proportion threshold for the last chunk.
Args:
text: The string with the contents of the file.
chunkSize: The size of the chunk, in words.
overlap: The number of words to overlap between chunks.
lastProp: The minimum proportional size that the last chunk has to be.
Returns:
A list of string that the text has been cut into.
"""
chunkList = [] # The list of the chunks (a.k.a a list of list of strings)
chunkSoFar = Queue() # The rolling window representing the (potential) chunk
currChunkSize = 0 # Index keeping track of whether or not it's time to make a chunk out of the window
tillNextChunk = chunkSize - overlap # The distance between the starts of chunks
splitText = splitKeepWhitespace(text)
# Create list of chunks (chunks are lists of words and whitespace) by using a queue as a rolling window
for token in splitText:
if token in WHITESPACE:
chunkSoFar.put(token)
else:
currChunkSize += 1
if currChunkSize > chunkSize:
chunkList.append(list(chunkSoFar.queue))
stripLeadingWords(wordQueue=chunkSoFar, numWords=tillNextChunk)
currChunkSize -= tillNextChunk
chunkSoFar.put(token)
# Making sure the last chunk is of a sufficient proportion
lastChunk = list(chunkSoFar.queue) # Grab the final (partial) chunk
if (float(countWords(lastChunk)) / chunkSize) < lastProp: # If the proportion of the last chunk is too low
if len(chunkList)==0:
chunkList.extend(lastChunk)
else:
chunkList[-1].extend(lastChunk)
else:
chunkList.append(lastChunk)
# Make the list of lists of strings into a list of strings
countSubList = 0
stringList=[]
for subList in chunkList:
stringList.extend([''.join(subList)])
if type(subList) is ListType:
countSubList+=1
# Prevent there isn't subList inside chunkList
if countSubList==0:
stringList = []
stringList.extend([''.join(chunkList)])
return stringList
|
0767eeab983f0d21a9fa14527a3962405019e110
| 18,381 |
def dsu_sort2(list, index, reverse=False):
"""
This function sorts only based on the primary element, not on secondary elements in case of equality.
"""
for i, e in enumerate(list):
list[i] = e[index]
if reverse:
list.sort(reverse=True)
else:
list.sort()
for i, e in enumerate(list):
list[i] = e[1]
return list
|
3fb614ac732eb790caf8f7d209c4e14022b8352a
| 18,382 |
import functools
def roca_view(full, partial, **defaults):
"""
Render partal for XHR requests and full template otherwise
"""
templ = defaults.pop('template_func', template)
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if request.is_xhr:
tpl_name = partial
else:
tpl_name = full
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return templ(tpl_name, **tplvars)
elif result is None:
return templ(tpl_name, defaults)
return result
return wrapper
return decorator
|
eed45a5fc201667744cfe213ec61f5fba546d70b
| 18,383 |
async def _shuffle(s, workers, dfs_nparts, dfs_parts, column):
"""
Parameters
----------
s: dict
Worker session state
workers: set
Set of ranks of all the participants
dfs_nparts: list of dict
List of dict that for each worker rank specifices the
number of partitions that worker has. If the worker doesn't
have any partitions, it is excluded from the dict.
E.g. `dfs_nparts[0][1]` is how many partitions of the "left"
dataframe worker 1 has.
dfs_parts: list of lists of Dataframes
List of inputs, which in this case are two dataframe lists.
column : label or list, or array-like
The bases of the rearrangement.
"""
assert s["rank"] in workers
df_parts = dfs_parts[0]
# Trimming such that all participanting workers get a rank within 0..len(workers)
trim_map = {}
for i in range(s["nworkers"]):
if i in workers:
trim_map[i] = len(trim_map)
rank = trim_map[s["rank"]]
eps = {trim_map[i]: s["eps"][trim_map[i]] for i in workers if i != s["rank"]}
df = df_concat(df_parts)
return await shuffle(len(workers), rank, eps, df, column)
|
7b9d8e7dc6687ee5fb661bb0912c6288f3473af9
| 18,384 |
def play_game(board:GoBoard):
"""
Run a simulation game to the end fromt the current board
"""
while True:
# play a random move for the current player
color = board.current_player
move = GoBoardUtil.generate_random_move(board,color)
board.play_move(move, color)
# current player is passing
if move is None:
break
# get winner
winner = GoBoardUtil.opponent(color)
return winner
|
3fe2d050e9835bdbabbc81b999edbce7fa0c96d1
| 18,385 |
def logical_array(ar):
"""Convert ndarray (int, float, bool) to array of 1 and 0's"""
out = ar.copy()
out[out!=0] = 1
return out
|
74d96d519929ed7f5ddfd92b0fbcef4741a38359
| 18,386 |
from datetime import datetime
import requests
def otp_route(
in_gdf,
mode,
date_time = datetime.now(),
trip_name = '',
):
"""
Return a GeoDataFrame with detailed trip information for the best option.
Parameters
----------
in_gdf : GeoDataFrame
It should only contain two records, first record is origina and
the second record is destination. If more than two records only
the first two records are considered.
mode : string
Indicates transport modes. Modes that can be used
include 'public_transport', 'car_in_traffic', 'car_free_flow',
'walk', 'cycle'
trip_name : string
gives the trip a name which is stored in the trip_name in output
GeoDataFrame.
date_time : a datetime object
Sets the start time of a trip. Only important if the mode is
transit or a subset of transit.
Returns
-------
GeoDataFrame
Has the structure
-``trip_name`` the name given as an input to the trip.
-``leg_id`` A counter for each trip leg
-``mode`` returns the mode for each trip leg
-``from`` the shaply point data in WSG84 for the origin location
-``from_name`` the interim stop id on the network or 'Origin'
-``to`` the shaply point data in WSG84 for the destination location
-``to_name`` the interim stop id on the network or 'Destination'
-``route_id`` the route id for the trip leg if the mode is transit
-``trip_id`` the trip id for the trip leg if the mode is transit
-``distance`` Distance traveled in meters for the trip leg
-``duration`` Travel time for the trip leg in seconds
-``startTime`` time stamp for the start time of the trip leg
-``endTime`` time stamp for the end time of the trip leg
-``waitTime`` Wait time for the trip leg in seconds
-``geometry`` The goemetry of the trip leg in shaply object and WGS84
"""
# The mode parameter is not validated by the Maps API
# Check here to prevent silent failures.
if mode not in list(cs.otp_modes.keys()):
raise ValueError("{0} is an invalid travel mode.".format(mode))
if in_gdf.crs.name != 'WGS 84':
# Check the cooridnate is WGS84
raise ValueError("Invalid coordinate system.")
if mode == 'public_transport' and not date_time:
date_time = datetime.now()
#get from and to location from locations_gdf
orig = in_gdf['geometry'].iat[0]
dest = in_gdf['geometry'].iat[-1]
orig_text = "{0}, {1}".format(orig.y, orig.x)
dest_text = "{0}, {1}".format(dest.y, dest.x)
t = date_time.strftime("%H:%M%p")
d = date_time.strftime("%m-%d-%Y")
#send query to api
url = 'http://localhost:8080/otp/routers/default/plan'
query = {
"fromPlace":orig_text,
"toPlace":dest_text,
"time":t,
"date":d,
"mode":cs.otp_modes[mode],
}
r = requests.get(url, params=query)
#check for request error
r.raise_for_status()
#if error then return emptly GeoDataFrame
if not 'error' in r.json():
#convert request output ot a GeoDataFrame
df = pd.DataFrame(r.json()['plan']['itineraries'][0]['legs']).reset_index()
df = df.rename(columns={
'index': 'leg_id',
'mode': 'mode',
'routeId': 'route_id',
'tripId': 'trip_id',
'startTime': 'start_time',
'endTime': 'end_time',
'wait_time': 'waitTime',
})
df['geometry'] = df['legGeometry'].map(
lambda x: geom.LineString([(p['lng'], p['lat']) for p in decode_polyline(x['points'])])
)
df['from_name'] = df['from'].map(lambda x: x['stopId'] if 'stopId' in x else x['name'])
df['to_name'] = df['to'].map(lambda x: x['stopId'] if 'stopId' in x else x['name'])
df['from'] = df['from'].map(lambda x: geom.Point(x['lon'], x['lat']))
df['to'] = df['to'].map(lambda x: geom.Point(x['lon'], x['lat']))
df['start_time'] = df['start_time'].map(lambda x: datetime.fromtimestamp(x/1000))
df['end_time'] = df['end_time'].map(lambda x: datetime.fromtimestamp(x/1000))
#calculate wait time
df['wait_time'] = df['start_time'].shift(-1)
df['wait_time'] = df['wait_time']-df['end_time']
df['trip_name'] = trip_name
for column in cs.columns:
if column not in df.columns.values:
df[column] = ''
#reorder the fields
df = df[cs.columns].copy()
gdf = gpd.GeoDataFrame(df, crs = cs.WGS84)
else:
gdf = gpd.GeoDataFrame()
gdf = gdf[gdf['geometry'].notnull()].copy()
return gdf
|
d19ec8e46b1480697cf0c9c7a83f0a859651b344
| 18,387 |
import os
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
|
d64cec9a1740347ba0c1a6197f0966a27ab2c0c4
| 18,388 |
def tall_clutter(files, config,
clutter_thresh_min=0.0002,
clutter_thresh_max=0.25, radius=1,
max_height=2000., write_radar=True,
out_file=None, use_dask=False):
"""
Wind Farm Clutter Calculation
Parameters
----------
files : list
List of radar files used for the clutter calculation.
config : str
String representing the configuration for the radar.
Such possible configurations are listed in default_config.py
Other Parameters
----------------
clutter_thresh_min : float
Threshold value for which, any clutter values above the
clutter_thres_min will be considered clutter, as long as they
are also below the clutter_thres_max.
clutter_thresh_max : float
Threshold value for which, any clutter values below the
clutter_thres_max will be considered clutter, as long as they
are also above the clutter_thres_min.
radius : int
Radius of the area surrounding the clutter gate that will
be also flagged as clutter.
max_height: float
Maximum height above the radar to mark a gate as clutter.
write_radar : bool
Whether to or not, to write the clutter radar as a netCDF file.
Default is True.
out_file : string
String of location and filename to write the radar object too,
if write_radar is True.
use_dask : bool
Use dask instead of running stats for calculation. The will reduce
run time.
Returns
-------
clutter_radar : Radar
Radar object with the clutter field that was calculated.
This radar only has the clutter field, but maintains all
other radar specifications.
"""
field_names = get_field_names(config)
refl_field = field_names["reflectivity"]
vel_field = field_names["velocity"]
ncp_field = field_names["normalized_coherent_power"]
def get_reflect_array(file, first_shape):
""" Retrieves a reflectivity array for a radar volume. """
try:
radar = pyart.io.read(file, include_fields=[refl_field,
ncp_field, vel_field])
reflect_array = deepcopy(radar.fields[refl_field]['data'])
ncp = radar.fields[ncp_field]['data']
height = radar.gate_z["data"]
up_in_the_air = height > max_height
the_mask = np.logical_or.reduce(
(ncp < 0.9, reflect_array.mask, up_in_the_air))
reflect_array = np.ma.masked_where(the_mask, reflect_array)
del radar
if reflect_array.shape == first_shape:
return reflect_array.filled(fill_value=np.nan)
except(TypeError, OSError):
print(file + ' is corrupt...skipping!')
return np.nan*np.zeros(first_shape)
if use_dask is False:
run_stats = _RunningStats()
first_shape = 0
for file in files:
try:
radar = pyart.io.read(file)
reflect_array = radar.fields[refl_field]['data']
ncp = deepcopy(radar.fields[ncp_field]['data'])
height = radar.gate_z["data"]
reflect_array = np.ma.masked_where(
np.logical_or(height > max_height, ncp < 0.8),
reflect_array)
if first_shape == 0:
first_shape = reflect_array.shape
clutter_radar = radar
run_stats.push(reflect_array)
if reflect_array.shape == first_shape:
run_stats.push(reflect_array)
del radar
except(TypeError, OSError):
print(file + ' is corrupt...skipping!')
continue
mean = run_stats.mean()
stdev = run_stats.standard_deviation()
clutter_values = stdev / mean
clutter_values = np.ma.masked_invalid(clutter_values)
clutter_values_no_mask = clutter_values.filled(
clutter_thresh_max + 1)
else:
cluster = LocalCluster(n_workers=20, processes=True)
client = Client(cluster)
first_shape = 0
i = 0
while first_shape == 0:
try:
radar = pyart.io.read(files[i])
reflect_array = radar.fields[refl_field]['data']
first_shape = reflect_array.shape
clutter_radar = radar
except(TypeError, OSError):
i = i + 1
print(file + ' is corrupt...skipping!')
continue
arrays = [delayed(get_reflect_array)(file, first_shape)
for file in files]
array = [da.from_delayed(a, shape=first_shape, dtype=float)
for a in arrays]
array = da.stack(array, axis=0)
print('## Calculating mean in parallel...')
mean = np.array(da.nanmean(array, axis=0))
print('## Calculating standard deviation...')
count = np.array(da.sum(da.isfinite(array), axis=0))
stdev = np.array(da.nanstd(array, axis=0))
clutter_values = stdev / mean
clutter_values = np.ma.masked_invalid(clutter_values)
clutter_values = np.ma.masked_where(np.logical_or(
clutter_values.mask, count < 20), clutter_values)
# Masked arrays can suck
clutter_values_no_mask = clutter_values.filled(
(clutter_thresh_max + 1))
shape = clutter_values.shape
mask = np.ma.getmask(clutter_values)
is_clutters = np.argwhere(
np.logical_and.reduce((clutter_values_no_mask > clutter_thresh_min,
clutter_values_no_mask < clutter_thresh_max,
)))
clutter_array = _clutter_marker(is_clutters, shape, mask, radius)
clutter_radar.fields.clear()
clutter_array = clutter_array.filled(0)
clutter_dict = _clutter_to_dict(clutter_array)
clutter_value_dict = _clutter_to_dict(clutter_values)
clutter_value_dict["long_name"] = "Clutter value (std. dev/mean Z)"
clutter_radar.add_field('ground_clutter', clutter_dict,
replace_existing=True)
clutter_radar.add_field('clutter_value', clutter_value_dict,
replace_existing=True)
if write_radar is True:
pyart.io.write_cfradial(out_file, clutter_radar)
del clutter_radar
return
|
7eb26fbca4977e35f82f844f74d17269d7f80989
| 18,389 |
def serialize_bundle7(source_eid, destination_eid, payload,
report_to_eid=None, crc_type_primary=CRCType.CRC32,
creation_timestamp=None, sequence_number=None,
lifetime=300, flags=BlockProcFlag.NONE,
fragment_offset=None, total_adu_length=None,
hop_limit=30, hop_count=0, bundle_age=0,
previous_node_eid=None,
crc_type_canonical=CRCType.CRC16):
"""All-in-one function to encode a payload from a source EID
to a destination EID as BPbis bundle.
See create_bundle7 for a description of options."""
return bytes(create_bundle7(
source_eid, destination_eid, payload,
report_to_eid, crc_type_primary,
creation_timestamp, sequence_number,
lifetime, flags,
fragment_offset, total_adu_length,
hop_limit, hop_count, bundle_age,
previous_node_eid,
crc_type_canonical
))
|
712a2de8814e5a3bf7143b27aa2d0a6f360e7db4
| 18,390 |
def _get_chinese_week(localtime):
"""获取星期和提醒"""
chinese_week = ["一", "二", "三", "四", "五", "六", "日"]
tm_w_day = localtime.tm_wday
extra_msg = "<green>当前正是周末啦~</green>" if tm_w_day in [5, 6] else "Other"
if extra_msg == "Other":
go_week = 4 - tm_w_day
extra_msg = f"<yellow>还有 {go_week} 天周末</yellow>" if go_week != 0 else "<blue>明天就是周末啦~坚持摸鱼~</blue>"
return chinese_week[tm_w_day], extra_msg
|
0a66bcf741c0d2e3cc9a238b5cb879c89333cc6b
| 18,391 |
def resnext101_32x16d_swsl(cfg, progress=True, **kwargs):
"""Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised
image dataset and finetuned on ImageNet.
`"Billion-scale Semi-Supervised Learning for Image Classification" <https://arxiv.org/abs/1905.00546>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 16
return _resnext(semi_weakly_supervised_model_urls['resnext101_32x16d'], Bottleneck,
[3, 4, 23, 3], cfg.pretrained, progress, **kwargs)
|
8c99c076278adfacd7764db50824a196a29341e5
| 18,392 |
def leaderboard(players=None, N=DEFAULTN, filename="leaderboard.txt"):
""" Create a leaderboard, and optionally save it to a file """
logger.info("Generating a leaderboard for players: %r, N=%d", players, N)
ratings, allgames, players = get_ratings(players, N)
board, table = make_leaderboard(ratings, allgames, players)
print table
if filename:
logger.info("Saving leaderboard to file: %s", filename)
with open(filename,"w") as f:
f.write(table)
f.write('\n')
return board, table
|
e5ae7dcd1fd3c54e008b8e472d36b0af0de29463
| 18,393 |
def m_college_type(seq):
"""
获取学校的类型信息
当学校的类型是985,211工程院校时:
:param seq:【“985,211工程院校”,“本科”】
:return:“985工程院校”
当学校的类型是211工程院校时:
:param seq:【“211工程院校”,“硕士”】
:return:“211工程院校”
当学校的类型是普通本科或者专科时:
如果获取的某人的学历信息是博士、硕士和本科时
输出的学校类型为普通本科
:param seq:【“****”,“硕士”】
:return:“普通本科”
如果获取的某个人的学历信息时专科时:
输出的学校类型为专科
:param seq:【“****”,“专科”】
:return:“专科”
"""
if "985" in seq[0]:
tmp = "985,211工程院校"
return tmp
elif "211" in seq[0] and "985" not in seq[0]:
tmp = "211工程院校"
return tmp
else:
if seq[1] in ["博士", "硕士", "本科"]:
tmp = "本科"
return tmp
else:
tmp = "专科"
return tmp
|
bf72f60c51a67dd3e18a7dd1957bc2beb4f933fd
| 18,394 |
from owslib.wcs import WebCoverageService
from lxml import etree
def get_raster_wcs(coordinates, geographic=True, layer=None):
"""Return a subset of a raster image from the local GeoServer via WCS 2.0.1 protocol.
For geoggraphic rasters, subsetting is based on WGS84 (Long, Lat) boundaries. If not geographic, subsetting based
on projected coordinate system (Easting, Northing) boundries.
Parameters
----------
coordinates : sequence
Geographic coordinates of the bounding box (left, down, right, up)
geographic : bool
If True, uses "Long" and "Lat" in WCS call. Otherwise uses "E" and "N".
layer : str
Layer name of raster exposed on GeoServer instance. E.g. 'public:CEC_NALCMS_LandUse_2010'
Returns
-------
bytes
A GeoTIFF array.
"""
(left, down, right, up) = coordinates
if geographic:
x, y = 'Long', 'Lat'
else:
x, y = 'E', 'N'
wcs = WebCoverageService('http://boreas.ouranos.ca/geoserver/ows', version='2.0.1')
try:
resp = wcs.getCoverage(identifier=[layer, ],
format='image/tiff',
subsets=[(x, left, right), (y, down, up)])
except Exception as e:
raise Exception(e)
data = resp.read()
try:
etree.fromstring(data)
# The response is an XML file describing the server error.
raise ChildProcessError(data)
except etree.XMLSyntaxError:
# The response is the DEM array.
return data
|
5ae378077b3dbe480ef9fae37030d953e156936e
| 18,395 |
def del_local_name(*args):
"""
del_local_name(ea) -> bool
"""
return _ida_name.del_local_name(*args)
|
8db5674e8eb3917c21f189ebfa82525482ff712f
| 18,396 |
def solve_google_pdp(data):
"""Entry point of the program."""
# Create the routing index manager.
manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),
data['num_vehicles'], data['depot'])
# Create Routing Model.
routing = pywrapcp.RoutingModel(manager)
# Define cost of each arc.
def distance_callback(from_index, to_index):
"""Returns the manhattan distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return data['distance_matrix'][from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
# Add Distance constraint.
dimension_name = 'Distance'
routing.AddDimension(
transit_callback_index,
0, # no slack
3000, # vehicle maximum travel distance
True, # start cumul to zero
dimension_name)
distance_dimension = routing.GetDimensionOrDie(dimension_name)
# Define Transportation Requests.
for request in data['pickups_deliveries']:
pickup_index = manager.NodeToIndex(request[0])
delivery_index = manager.NodeToIndex(request[1])
routing.AddPickupAndDelivery(pickup_index, delivery_index)
routing.solver().Add(
routing.VehicleVar(pickup_index) == routing.VehicleVar(
delivery_index))
routing.solver().Add(
distance_dimension.CumulVar(pickup_index) <=
distance_dimension.CumulVar(delivery_index))
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION)
search_parameters.local_search_metaheuristic = (
routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
search_parameters.time_limit.seconds = 30
search_parameters.log_search = True # Turn on Log for Algorithms
assignment = routing.SolveWithParameters(search_parameters)
g_result = meta.Chromosome(_instance)
g_result.genes = []
if assignment:
total_distance = 0
for vehicle_id in range(data['num_vehicles']):
index = routing.Start(vehicle_id)
plan_output = 'Route for vehicle {}:\n'.format(vehicle_id)
route_distance = 0
while not routing.IsEnd(index):
plan_output += ' {} -> '.format(manager.IndexToNode(index))
previous_index = index
index = assignment.Value(routing.NextVar(index)) # Input Tasks
if manager.IndexToNode(index) != 0:
g_result.genes.append([manager.IndexToNode(index), vehicle_id + 1, False])
route_distance += routing.GetArcCostForVehicle(
previous_index, index, vehicle_id)
plan_output += '{}\n'.format(manager.IndexToNode(index))
plan_output += 'Distance of the route: {}m\n'.format(route_distance)
print(plan_output)
total_distance += route_distance
print('Total Distance of all routes: {}m'.format(total_distance))
meta.evaluate(g_result)
return g_result
|
2b269dbca031c0cf98f51c2e6a493ba7df71a1d6
| 18,397 |
from typing import Any
def fetch_net(args: Any,
num_tasks: int,
num_cls: int,
dropout: float = 0.3):
"""
Create a nearal network to train
"""
if "mnist" in args.dataset:
inp_chan = 1
pool = 2
l_size = 80
elif args.dataset == "mini_imagenet":
inp_chan = 3
pool = 3
l_size = 320
elif "cifar" in args.dataset:
inp_chan = 3
pool = 2
l_size = 320
else:
raise NotImplementedError
if args.model == "wrn16_4":
net = WideResNetMultiTask(depth=16, num_task=num_tasks,
num_cls=num_cls, widen_factor=4,
drop_rate=dropout, inp_channels=inp_chan)
elif args.model == "conv":
net = SmallConv(num_task=num_tasks, num_cls=num_cls,
channels=inp_chan, avg_pool=pool,
lin_size=l_size)
else:
raise ValueError("Invalid network")
if args.gpu:
net.cuda()
return net
|
cbc3abb5140060ef52a69e44883a743249b5cd5e
| 18,398 |
from typing import Dict
def are_models_specified(api_spec: Dict) -> bool:
"""
Checks if models have been specified in the API spec (cortex.yaml).
Args:
api_spec: API configuration.
"""
predictor_type = predictor_type_from_api_spec(api_spec)
if predictor_type == PythonPredictorType and api_spec["predictor"]["multi_model_reloading"]:
models = api_spec["predictor"]["multi_model_reloading"]
elif predictor_type != PythonPredictorType:
models = api_spec["predictor"]["models"]
else:
return False
return models is not None
|
611ed794b45746e56bb8055a03251aa43d61d974
| 18,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.