content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def calculate(over):
"""Returns the value of the first triangle number to have
over the specified number of divisors"""
triangle = 0
count = sum(range(triangle))
while True:
if num_divisors(count) > over:
answer = count
return answer
triangle += 1
count = sum(range(triangle)) | e7391bea108261bb2b7abc64cbdd6ba6285deaae | 2,600 |
import numpy
def convert_image_to_kernel(im: Image, oversampling, kernelwidth):
""" Convert an image to a griddata kernel
:param im: Image to be converted
:param oversampling: Oversampling of Image spatially
:param kernelwidth: Kernel width to be extracted
:return: numpy.ndarray[nchan, npol, oversampling, oversampling, kernelwidth, kernelwidth]
"""
naxis = len(im.shape)
assert numpy.max(numpy.abs(im.data)) > 0.0, "Image is empty"
nchan, npol, ny, nx = im.shape
assert nx % oversampling == 0, "Oversampling must be even"
assert ny % oversampling == 0, "Oversampling must be even"
assert kernelwidth < nx and kernelwidth < ny, "Specified kernel width %d too large"
assert im.wcs.wcs.ctype[0] == 'UU', 'Axis type %s inappropriate for construction of kernel' % im.wcs.wcs.ctype[0]
assert im.wcs.wcs.ctype[1] == 'VV', 'Axis type %s inappropriate for construction of kernel' % im.wcs.wcs.ctype[1]
newwcs = WCS(naxis=naxis + 2)
for axis in range(2):
newwcs.wcs.ctype[axis] = im.wcs.wcs.ctype[axis]
newwcs.wcs.crpix[axis] = kernelwidth // 2
newwcs.wcs.crval[axis] = 0.0
newwcs.wcs.cdelt[axis] = im.wcs.wcs.cdelt[axis] * oversampling
newwcs.wcs.ctype[axis + 2] = im.wcs.wcs.ctype[axis]
newwcs.wcs.crpix[axis + 2] = oversampling // 2
newwcs.wcs.crval[axis + 2] = 0.0
newwcs.wcs.cdelt[axis + 2] = im.wcs.wcs.cdelt[axis]
# Now do Stokes and Frequency
newwcs.wcs.ctype[axis + 4] = im.wcs.wcs.ctype[axis + 2]
newwcs.wcs.crpix[axis + 4] = im.wcs.wcs.crpix[axis + 2]
newwcs.wcs.crval[axis + 4] = im.wcs.wcs.crval[axis + 2]
newwcs.wcs.cdelt[axis + 4] = im.wcs.wcs.cdelt[axis + 2]
newdata_shape = [nchan, npol, oversampling, oversampling, kernelwidth, kernelwidth]
newdata = numpy.zeros(newdata_shape, dtype=im.data.dtype)
assert oversampling * kernelwidth < ny
assert oversampling * kernelwidth < nx
ystart = ny // 2 - oversampling * kernelwidth // 2
xstart = nx // 2 - oversampling * kernelwidth // 2
yend = ny // 2 + oversampling * kernelwidth // 2
xend = nx // 2 + oversampling * kernelwidth // 2
for chan in range(nchan):
for pol in range(npol):
for y in range(oversampling):
slicey = slice(yend + y, ystart + y, -oversampling)
for x in range(oversampling):
slicex = slice(xend + x, xstart + x, -oversampling)
newdata[chan, pol, y, x, ...] = im.data[chan, pol, slicey, slicex]
return create_image_from_array(newdata, newwcs, polarisation_frame=im.polarisation_frame) | fe1a2a81421a5f3c09e6c6439aeb7b52e217967f | 2,601 |
def prob(X, w):
"""
X: Nxd
w: dx1
---
prob: N x num_classes(2)"""
y = tf.constant(np.array([0.0, 1.0]), dtype=tf.float32)
prob = tf.exp(tf.matmul(X, w) * y) / (1 + tf.exp(tf.matmul(X, w)))
return prob | b916f75bc3596bbbff701b6dbb3b43add0f06373 | 2,602 |
def get_similar_genes_Quantiles(
gene_expr: np.array,
n_genes: int,
candidate_quants: np.ndarray,
candidate_genes: np.array,
quantiles=(0.5, 0.75, 0.85, 0.9, 0.95, 0.97, 0.98, 0.99, 1),
):
"""Gets genes with a similar expression distribution as the inputted gene,
by measuring distance between the gene expression quantiles.
Parameters
----------
gene_expr: np.array Expression of the gene of interest, or, if the same length as quantiles, then assumes is the pre-calculated quantiles.
n_genes: int Number of equivalent genes to select.
candidate_quants: np.ndarray Expression quantiles of gene candidates (quantiles*genes).
candidate_genes: np.array Same as candidate_expr.shape[1], indicating gene names.
quantiles: tuple The quantile to use
Returns
-------
similar_genes: np.array Array of strings for gene names.
"""
if type(quantiles) == float:
quantiles = np.array([quantiles])
else:
quantiles = np.array(quantiles)
# Getting the quantiles for the gene #
if len(gene_expr) != len(quantiles):
# ref_quants = np.quantile(gene_expr, q=quantiles, interpolation='nearest')
ref_quants = nonzero_quantile(gene_expr, q=quantiles, interpolation="nearest")
else:
ref_quants = gene_expr
# Measuring distances from the desired gene #
dists = np.apply_along_axis(canberra, 0, candidate_quants, ref_quants)
order = np.argsort(dists)
""" During debugging, plotting distribution of distances & selected genes.
import matplotlib.pyplot as plt
cutoff = dists[order[n_genes]]
fig, ax = plt.subplots()
ax.hist(dists[order[0:28000]], bins=1000)
y_max = ax.get_ylim()[1]
ax.vlines(cutoff, 0, y_max/2, color='r')
plt.show()
print(candidate_quants[:,order[0:3]]) # Showing the quantiles of selected
print(candidate_quants[:,order[n_genes-3:n_genes]])
print(ref_quants)
"""
# Retrieving desired number of genes #
similar_genes = candidate_genes[order[0:n_genes]]
return similar_genes | c327e78a08f8da73af896a2e496f6c24258cc271 | 2,603 |
def get_date_strings():
"""
Get date strings for last month and this month in "%Y%m" format, e.g. "202201"
"""
today = date.today()
first = today.replace(day=1)
last_month = first - timedelta(days=1)
this_month_string = today.strftime("%Y%m")
last_month_string = last_month.strftime("%Y%m")
return this_month_string, last_month_string | cc09f710d86efcc73a7e653d30cc2d590ba865e6 | 2,604 |
import logging
def get_gpa(cookie, sno, year='', term=''):
"""
获取已取得的总基点: 专必 公必 公选 专选
"""
logging.debug('Getting gpa: %s %s %s %s', sno, year, term, cookie)
url = 'http://uems.sysu.edu.cn/jwxt/xscjcxAction/xscjcxAction.action?method=getAllJd'
query_json = """
{
header: {
"code": -100,
"message": {
"title": "",
"detail": ""
}
},
body: {
dataStores: {
jdStore: {
rowSet: {
"primary": [],
"filter": [],
"delete": []
},
name: "jdStore",
pageNumber: 1,
pageSize: 2147483647,
recordCount: 0,
rowSetName: "pojo_com.neusoft.education.sysu.djks.ksgl.model.TwoColumnModel"
}
},
parameters: {
"args": [
"%s",
"%s",
"%s",
""
]
}
}
}
""" %(sno, year, term)
return retrive_data(url, cookie, query_json) | d586aab689e40be763ccb907a4673e2da500e8a2 | 2,605 |
import torch
from typing import Tuple
def rotate(
img: torch.Tensor,
boxes: np.ndarray,
angle: float,
) -> Tuple[torch.Tensor, np.ndarray]:
"""Rotate image around the center, interpolation=NEAREST, pad with 0 (black)
Args:
img: image to rotate
boxes: array of boxes to rotate as well
angle: angle in degrees. +: counter-clockwise, -: clockwise
Returns:
A tuple of rotated img (tensor), rotated boxes (np array)
"""
rotated_img = F.rotate(img, angle=angle, fill=0) # Interpolation NEAREST by default
_boxes = deepcopy(boxes)
if boxes.dtype == int:
# Compute relative boxes
_boxes = _boxes.astype(float)
_boxes[:, [0, 2]] = _boxes[:, [0, 2]] / img.shape[2]
_boxes[:, [1, 3]] = _boxes[:, [1, 3]] / img.shape[1]
# Compute rotated bboxes: xmin, ymin, xmax, ymax --> x, y, w, h, alpha
r_boxes = rotate_boxes(_boxes, angle=angle, min_angle=0)
if boxes.dtype == int:
# Back to absolute boxes
r_boxes[:, [0, 2]] *= img.shape[2]
r_boxes[:, [1, 3]] *= img.shape[1]
return rotated_img, r_boxes | acd5c83a857b1bdb2312a078cfd972f9a1a0df9f | 2,606 |
def _letterbox_image(img, w_in, h_in):
"""To get the image in boxed format."""
imc, imh, imw = img.shape
if (w_in / imw) < (h_in / imh):
new_w = w_in
new_h = imh * w_in // imw
else:
new_h = h_in
new_w = imw * h_in // imh
resized = _resize_image(img, new_w, new_h)
boxed = np.full((imc, h_in, w_in), 0.5, dtype=float)
_, resizedh, resizedw = resized.shape
boxed[:, int((h_in - new_h) / 2)
:int((h_in - new_h) / 2) + resizedh, int((w_in - new_w) / 2)
:int((w_in - new_w) / 2) + resizedw] = resized
return boxed | 918e96f3ac7f5b1c8f7177ad759dab0579763e77 | 2,607 |
def to_RRDB(**kwargs):
"""
Residual in Residual Dense Blocks
"""
kwargs["n_filer"] = (" ",) * len(kwargs["n_filer"]) # remove x label
return _Box(fill="{rgb:white,1;black,3}", **kwargs) | 2b1afd5f4a8c65364fcdee18fc8da3da71eade08 | 2,608 |
def continuous_agg_dict_features(n, n_feats, ks):
"""Listdict-like continuous aggregated features.
Parameters
----------
n: int
the number of the elements to create their features.
n_feats: int
the number of features.
ks: int
the number of perturbations.
Returns
-------
features: list
the random features we want to compute.
"""
features = []
for k in range(ks):
features.append(continuous_dict_features(n, n_feats))
return features | ec98930c124553a86ef50db58cf7e13107bf6e52 | 2,609 |
def counts_matrix(x, quantiles):
"""Count samples in strata
Get eta, the number of samples in ``x`` binned by ``quantiles`` in each
variable, for continuous variables. The shape of eta is the same as the
shape of ``x``, and the shape of ``quantiles`` should be
(``numpy.shape(x)[0] + 1``, ``numpy.shape(x)[1]``) for 2D, or
(``numpy.size(x) + 1``,) for 1D
Parameters
----------
x : :class:`numpy.ndarray` (Nx,) or (Nx, Npredictors)
The sampled predictors, with observations as rows and predictors (if
more than 1) as columns
quantiles : :class:`numpy.ndarray` (Nx + 1,) or (Nx + 1, Npredictors)
The quantiles which mark the edge of strata. The 0th axis must be
one element longer than the 0th axis of ``x``
Returns
-------
eta : :class:`numpy.ndarray`[``int``] (Nx,) or (Nx, Npredictors)
The matrix of counts in strata, with the same shape as ``x``
"""
if np.ndim(quantiles) == 1:
eta = np.histogram(np.squeeze(x), bins=quantiles)[0].astype(int)
else:
eta = np.array([
np.histogram(xj, bins=qj)[0].astype(int) for xj, qj in zip(
np.asarray(x).T, np.asarray(quantiles).T)]).T
return eta | 935cd19913e420ea6713ca74ead19f720bdef782 | 2,610 |
import logging
def get_xml_string(stream_pointer):
""" This function checks for valid xml in a stream
and skips bytes until it hits something that looks like
xml. In general, this 'skipping' should never be used, as
we expect to see well-formed XML from the server.
stream_pointer: input stream
returns: string of xml
"""
# This function avoid stream_pointer.seek() for the vast majority
# of cases (when xml is formatted correctly) just because i don't
# like using 'seek' (never know when you're getting non-rewindable
# streams
c = stream_pointer.read(1)
count = 0
while c != '<' and c != '':
count = count + 1
c = stream_pointer.read(1)
if c == '':
stream_pointer.seek(0)
logging.error("Poorly formatted schema - no '<' found", \
extra={'xml':stream_pointer.read()})
return
xml_string = "<" + stream_pointer.read()
if count > 0:
stream_pointer.seek(0)
logging.error("Poorly formatted schema", \
extra={'xml':stream_pointer.read()})
return xml_string | 3fa2e3d05bfc66cee592c4c40cc1e9349e512c3a | 2,611 |
import re
def parse_header(header):
"""Parse header div for pub. title, authors journal, year, and doi."""
# TITLE
title = header.find('h1').text.strip()
# JOURNAL
journal = header.find('button').text.strip()
# PUBLICATION YEAR
pub_date = header.find('span', attrs={'class': "cit"}).text
year = re.search(r"(\d{4}).*?[\.;]", pub_date).group(1)
# DOI
doi_cit = header.find(attrs={'class': "citation-doi"})
doi = doi_cit.text.strip().lstrip("doi: ").rstrip(".") if doi_cit else ""
# AUTHORS
authors = [parse_author(a) for a in header.find_all(
'span', attrs={'class': "authors-list-item"})]
authors = [a for a in authors if a]
return (title, journal, year, doi, authors) | 70dc1defbd9e6098e0754164d0dd23c7c79074d6 | 2,612 |
import argparse
def parse_arguments():
"""Parse user args
There are three subparsers, one for each mode: full, visit, and moab.
Full mode runs both the visit and moab steps. Each parser should have a
full help message, simplified usage statement, and examples.
"""
mode_examples = """
To view full options for each mode, use 'generate_isogeom MODE -h'.
Example usage:
(1) Run all the steps start to finish (full mode) starting with meshfile
'cw_mesh', scalar data 'wwn', and defining 3 values for the level
information at runtime:
generate_isogeom full cw_mesh wwn -lv 0.1 5.2 12.3
(2) Run just the first step (visit mode), generating logarithmically spaced
levels between 0.1 and 1e+14 and specifying where to write the
generated database:
generate_isogeom visit cw_mesh wwn -gl log -lx 0.1 1e14 -db my_database
(3) Run only the second step (moab mode), using the levelfile and database
from the MOAB step, and specifying a file name for file produced:
generate_isogeom moab -lf my_database/levelfile -db my_database
-g geom1.h5m
"""
mode_description = """
Use this to generate a full isosurface geometry from a starting Cartesian mesh
file containing scalar data using VisIt and MOAB. This tool can be run in three
different modes:
full: run both steps starting from the Cartesian mesh file to produce
a full DAGMC-compliant isosurface geom. This step first runs the visit
step then the moab step.
visit: run only the first step using VisIt. This will generate a database
of individual mesh isosurfaces from the Cartesian mesh fileself.
moab: run only the second step using MOAB. This will generate a full DAGMC-
compliant isosurface geometry starting from the database generated from
the visit step.
"""
parser = argparse.ArgumentParser(description=mode_description,
usage='generate_isogeom MODE [OPTIONS]',
epilog=mode_examples,
formatter_class=formatter)
subparsers = parser.add_subparsers(title='Modes',
help='Select which steps to run for ' +
'generating the geometry.')
# set full mode options
full_description = """
Start-to-finish generation from a Cartesian mesh file to a DAGMC-compliant
geometry.
Levels information must be provided with either the -lf, -lv, or -gl option.
If using the -gl option (generate levels), then options -lx and -N must also be
provided.
"""
full_usage = \
'generate_isogeom full meshfile dataname [-lf/-lv/-gl] [OPTIONS]'
full_examples = """
Example Usage:
(1) Create an isosurface geometry called 'my_isogeom.h5m' with assigned
level values of 0.1 0.4 and 1.0, and tag the surfaces with data for
vizualization:
generate_isogeom full meshfile my_data -lv 0.1 0.4 1.0
-g my_isogeom.h5m --viz
(2) Generate a geometry with 5 levels lograthmically spaced from 1e-5 and
1e+3. Also tag the geometry two metadata tags called E1 and E2 with
values of 1.0 and 10.0, respectively:
generate_isogeom full meshfile my_data -gl log -lx 1e-5 1e+3 -N 5
-t E1 1.0 -t E2 10.0
(3) Store the generated database in a different folder called 'my_isogeom/'
and read level information from a file called 'levelfile' located in
the current directory:
generate_isogeom full meshfile my_data -lf levelfile -db my_isogeom/
"""
full_parser = subparsers.add_parser('full',
description=full_description,
usage=full_usage,
epilog=full_examples,
formatter_class=formatter)
set_visit_only_options(full_parser)
set_shared_options(full_parser)
set_moab_only_options(full_parser)
full_parser.set_defaults(which='full')
# set visit only mode options
visit_description = """
Only generate the isosurface mesh file database using VisIt.
Levels information must be provided with either the -lf, -lv, or -gl option.
If using the -gl option (generate levels), then options -lx and -N must also be
provided.
"""
visit_usage = \
'generate_isogeom visit meshfile dataname [-lf/-lv/-gl] [OPTIONS]'
visit_examples = """
Example Usage:
(1) Generate a database located at 'my_database/' with assigned
level values of 0.1 0.4 and 1.0:
generate_isogeom visit meshfile my_data -lv 0.1 0.4 1.0
-db my_isogeom/
(2) Generate a database in the default location using levels between 1.0
2e+4 that are spaced with a ratio of 20:
generate_isogeom visit meshfile my_data -gl ratio -lx 1.0 2.e4 -N 20
(3) Generate a database in the default location using 15 levels between 1.0
2e+4 that are spaced logarithmically:
generate_isogeom visit meshfile my_data -gl log -lx 1.0 2.e4 -N 15
(4) Generate a database in a folder called 'my_isogeom/' and read the level
information from a file in the current directory called 'levelfile':
generate_isogeom visit meshfile my_data -lf levelfile -db my_isogeom/
"""
visit_parser = subparsers.add_parser('visit',
description=visit_description,
usage=visit_usage,
epilog=visit_examples,
formatter_class=formatter)
set_visit_only_options(visit_parser)
set_shared_options(visit_parser)
visit_parser.set_defaults(which='visit')
# set moab only mode options
moab_description = """
Only generate the DAGMC-compliant geometry with MOAB starting from the VisIt
mesh file database.
Levels information must be provided with either the -lf or -lv option.
"""
moab_usage = 'generate_isogeom moab dataname [-lf/-lv] [OPTIONS]'
moab_examples = """
Example Usage:
(1) Create an isosurface geometry called 'my_isogeom.h5m' with assigned
level values of 0.1 0.4 and 1.0, and tag the surfaces with data for
vizualization (assume default database location):
generate_isogeom moab -lv 0.1 0.4 1.0 -g my_isogeom.h5m --viz
(2) Generate a geometry from a database located in 'my_isogeom/', read the
level info from a file called 'levelinfo', mutliply all data by a
factor of 2e4, and save the file as 'my_isogeom.vtk' in a new folder
called 'output_folder/':
generate_isogeom moab -db my_isogeom/ -lf levelinfo -n 2e4
-g my_isogeom.vtk -sp output_folder/
(3) Generate a geometry from a database in the default location, read
levels from a file called 'levelfile' located in the database, tag the
geometry two metadata tags called E1 and E2 with values of 1.0 and
10.0, respectively, and tag the geometry with the level information for
vizualization:
generate_isogeom moab -lf tmp/levelfile -t E1 1.0 -t E2 10.0 -v
"""
moab_parser = subparsers.add_parser('moab',
description=moab_description,
usage=moab_usage,
epilog=moab_examples,
formatter_class=formatter)
set_shared_options(moab_parser, moab=True)
set_moab_only_options(moab_parser)
moab_parser.set_defaults(which='moab')
args = parser.parse_args()
return args | 9cd3809479ab49a53343c6a7007e34fbf08dc23b | 2,613 |
def put_this_into_the_db(query, param):
"""put this value into the database
see : find_by_exactly_this_query()
Arguments:
query {[type]} -- [description]
param {[type]} -- [description]
Returns:
bool -- [description]
"""
# Connect to the database
connection = pymysql.connect(host='localhost',
user='root',
password='(drElizabeth)',
db='communications',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
# Create a new record
sql = query
cursor.execute(sql, param)
# connection is not autocommit by default. So you must commit to save
# your changes.
connection.commit()
except Exception as e:
print(e)
connection.close()
return False
connection.close()
return True | 08cebe330cea5f10189342c6f3ec4f9f7cc022e1 | 2,614 |
def _gen_new_aux_page(label: str, is_title: bool) -> str:
"""Generate latex for auxillary pages"""
page = []
if is_title:
page.append("\\thispagestyle{empty}")
page.append("\\begin{center}")
page.append("\t\\vfil")
page.append("\t\\vspace*{0.4\\textheight}\n")
page.append("\t\\Huge")
page.append(f"\t\\bf{{{label}}}\n")
page.append("\t\\normalsize")
page.append("\\end{center}")
return "\n".join(page) | 3ff31ae80f007fd5da2dd6153ea605978421c086 | 2,615 |
def expand_matrix_col(matrix, max_size, actual_size):
"""
add columns of zeros to the right of the matrix
"""
return np.append(matrix,
np.zeros((matrix.shape[0], max_size - actual_size), dtype=matrix.dtype), axis=1) | 23b20b443c880d1658eeec89910f9f3384576e6e | 2,616 |
import logging
def vms_list(access_token, config_id):
"""List FlexVM Virtual Machines"""
logging.info("--> List FlexVM Virtual Machines...")
uri = FLEXVM_API_BASE_URI + "vms/list"
headers = COMMON_HEADERS.copy()
headers["Authorization"] = f"Bearer {access_token}"
body = {"configId": config_id}
results = requests_post(uri, body, headers)
return results | eed35eefae4e26d743e0e96e791b6f5dd84d0c2f | 2,617 |
def formulate_hvdc_flow(problem: LpProblem, angles, Pinj, rates, active, Pt, control_mode, dispatchable, r, F, T,
logger: Logger = Logger(), inf=999999):
"""
:param problem:
:param nc:
:param angles:
:param Pinj:
:param t:
:param logger:
:param inf:
:return:
"""
nhvdc, nt = rates.shape
flow_f = np.zeros((nhvdc, nt), dtype=object)
overload1 = np.zeros((nhvdc, nt), dtype=object)
overload2 = np.zeros((nhvdc, nt), dtype=object)
hvdc_control1 = np.zeros((nhvdc, nt), dtype=object)
hvdc_control2 = np.zeros((nhvdc, nt), dtype=object)
for t, i in product(range(nt), range(nhvdc)):
if active[i, t]:
_f = F[i]
_t = T[i]
hvdc_control1[i, t] = LpVariable('hvdc_control1_{0}_{1}'.format(i, t), 0, inf)
hvdc_control2[i, t] = LpVariable('hvdc_control2_{0}_{1}'.format(i, t), 0, inf)
P0 = Pt[i, t]
if control_mode[i] == HvdcControlType.type_0_free:
if rates[i, t] <= 0:
logger.add_error('Rate = 0', 'HVDC:{0} t:{1}'.format(i, t), rates[i, t])
# formulate the hvdc flow as an AC line equivalent
bk = 1.0 / r[i] # TODO: yes, I know... DC...
flow_f[i, t] = P0 + bk * (angles[_f, t] - angles[_t, t]) + hvdc_control1[i, t] - hvdc_control2[i, t]
# add the injections matching the flow
Pinj[_f, t] -= flow_f[i, t]
Pinj[_t, t] += flow_f[i, t]
# rating restriction in the sense from-to: eq.17
overload1[i, t] = LpVariable('overload_hvdc1_{0}_{1}'.format(i, t), 0, inf)
problem.add(flow_f[i, t] <= (rates[i, t] + overload1[i, t]), "hvdc_ft_rating_{0}_{1}".format(i, t))
# rating restriction in the sense to-from: eq.18
overload2[i, t] = LpVariable('overload_hvdc2_{0}_{1}'.format(i, t), 0, inf)
problem.add((-rates[i, t] - overload2[i, t]) <= flow_f[i, t], "hvdc_tf_rating_{0}_{1}".format(i, t))
elif control_mode[i] == HvdcControlType.type_1_Pset and not dispatchable[i]:
# simple injections model: The power is set by the user
flow_f[i, t] = P0 + hvdc_control1[i, t] - hvdc_control2[i, t]
Pinj[_f, t] -= flow_f[i, t]
Pinj[_t, t] += flow_f[i, t]
elif control_mode[i] == HvdcControlType.type_1_Pset and dispatchable[i]:
# simple injections model, the power is a variable and it is optimized
P0 = LpVariable('hvdc_pf_{0}_{1}'.format(i, t), -rates[i, t], rates[i, t])
flow_f[i, t] = P0 + hvdc_control1[i, t] - hvdc_control2[i, t]
Pinj[_f, t] -= flow_f[i, t]
Pinj[_t, t] += flow_f[i, t]
return flow_f, overload1, overload2, hvdc_control1, hvdc_control2 | b9fb1d6d7fdf19ee97f1b29f4e4b279130aab530 | 2,618 |
from unittest.mock import patch
def method_mock(cls, method_name, request):
"""
Return a mock for method *method_name* on *cls* where the patch is
reversed after pytest uses it.
"""
_patch = patch.object(cls, method_name)
request.addfinalizer(_patch.stop)
return _patch.start() | b14d991c42e0c05a51d9c193c3769b1e1e71dd1f | 2,619 |
def get_eps_float32():
"""Return the epsilon value for a 32 bit float.
Returns
-------
_ : np.float32
Epsilon value.
"""
return np.finfo(np.float32).eps | e0506637aa3f9c29dc33d1256ce21d7dc686a4cd | 2,620 |
def distributions_to_params(nest):
"""Convert distributions to its parameters, keep Tensors unchanged.
Only returns parameters that have tf.Tensor values.
Args:
nest (nested Distribution and Tensor): Each Distribution will be
converted to dictionary of its Tensor parameters.
Returns:
A nest of Tensor/Distribution parameters. Each leaf is a Tensor or a
dict corresponding to one distribution, with keys as parameter name and
values as tensors containing parameter values.
"""
def _to_params(dist_or_tensor):
if isinstance(dist_or_tensor, tfp.distributions.Distribution):
params = dist_or_tensor.parameters
return {
k: params[k]
for k in params if isinstance(params[k], tf.Tensor)
}
elif isinstance(dist_or_tensor, tf.Tensor):
return dist_or_tensor
else:
raise ValueError(
"Only Tensor or Distribution is allowed in nest, ",
"got %s. nest is %s" % (dist_or_tensor, nest))
return tf.nest.map_structure(_to_params, nest) | bfa1cfd043bd46667de8ed07fd54fef959b272ae | 2,621 |
def _return_xarray_system_ids(xarrs: dict):
"""
Return the system ids for the given xarray object
Parameters
----------
xarrs
Dataset or DataArray that we want the sectors from
Returns
-------
list
system identifiers as string within a list
"""
return list(xarrs.keys()) | 8380d1c2ae9db48eb4b97138dcd910d58085073e | 2,622 |
def sub(a, b):
"""Subtracts b from a and stores the result in a."""
return "{b} {a} ?+1\n".format(a=a, b=b) | dcc0ddfc9dbefe05d79dea441b362f0ddfe82627 | 2,623 |
def metrics_cluster(models = None, ytrain = None, ytest = None,
testlabels = None,
trainlabels = None,
Xtrain = None, Xtest = None):
"""
Calculates Metrics such as accuracy, balanced accuracy,
specificity, sensitivity, precision, True Positives,
True Negatives etc.
These metrics are calculated for each cluster:
models: predictive models trained in each cluster
ytrain: Target labels of training set
ytest: target labels of test set
testlabels: a matrix with numbers from 0 to c-1 number of clusters
indicating in which cluster each data point belongs
in the test set
trainlabels: the same as testlabels but for training data
Xtrain: trainiing data
Xtest: testing data
"""
# matrix with metrics for each cluster
metricsTrain = []
#metrics for test data in each cluster
metricsTest = []
columns = ['cluster', 'size', 'high_cost%','low_cost%',
'TP', 'TN', 'FP', 'FN',
'FPR', 'specificity', 'sensitivity', 'precision',
'accuracy', 'balanced accuracy', 'f1', 'auc']
#Calculate the Metrics for Each Cluster
for cluster in np.arange( len( models ) ):
#INDEXES OF CLUSTER "cluster"
inC = np.where( trainlabels == cluster )[0]
inCT = np.where( testlabels == cluster )[0]
#predict probabilities of data in cluster "cluster"
#to be 1
probTrain = models[cluster].predict_proba(Xtrain[inC])[:, 1]
probTest = models[cluster].predict_proba(Xtest[inCT])[:, 1]
#calculate optimal tau based on F1
try:
tau = optimalTau(probTrain, ytrain[inC])
except:
tau = 0.5
print(" Warning tau setted to 0.5 due to error(s) \
in <<optimalTau>> function" )
#CALCULATE METRICS : ACCURACY, RECALL, PRECISION ,
#BALANCED ACCURACY ETC
metTrain , _= calc_metrics( custom_prob = probTrain,
y = ytrain[inC],
cluster = cluster,
tau = tau )
metTest, _ = calc_metrics( custom_prob = probTest,
y = ytest[inCT],
cluster = cluster,
tau = tau)
metricsTrain.append( metTrain )
metricsTest.append( metTest )
#Create a dataframe with metrics for better Visualization
metricsTrain = pd.DataFrame ( metricsTrain, columns = columns )
metricsTest = pd.DataFrame( metricsTest, columns = columns )
return metricsTrain, metricsTest | c9c131385a47df3de511db0e85ece20131647d4e | 2,624 |
def prune_cloud_borders (numpy_cloud, clearance=1.2 ):
"""Delete points at the clouds' borders in range of distance, restricting the x-y plane (ground)"""
# get min/max of cloud
cloud_max_x = np.max (numpy_cloud[:, 0])
cloud_min_x = np.min (numpy_cloud[:, 0])
cloud_max_y = np.max (numpy_cloud[:, 1])
cloud_min_y = np.min (numpy_cloud[:, 1])
# define 4 borders
borders = [cloud_max_x - clearance, cloud_min_x + clearance,
cloud_max_y - clearance, cloud_min_y + clearance]
# index all points within borders
numpy_cloud = numpy_cloud[numpy_cloud[:, 0] < borders[0]]
numpy_cloud = numpy_cloud[numpy_cloud[:, 0] > borders[1]]
numpy_cloud = numpy_cloud[numpy_cloud[:, 1] < borders[2]]
numpy_cloud = numpy_cloud[numpy_cloud[:, 1] > borders[3]]
return numpy_cloud | f208c9778343c3240803b52ff3e5f4701a8bb1cb | 2,625 |
def factory(name, Base, Deriveds):
"""Find the base or derived class by registered name.
Parameters
----------
Base: class
Start the lookup here.
Deriveds: iterable of (name, class)
A list of derived classes with their names.
Returns
-------
class
"""
Derived = Base
for (nm, NmCl) in Deriveds:
if nm == name:
Derived = NmCl
break
return Derived | 1bce29651004cf1f04740fd95a4f62c6c2277a72 | 2,626 |
def root_sum_square(values, ax_val, index, Nper, is_aper, is_phys, unit):
"""Returns the root sum square (arithmetic or integral) of values along given axis
Parameters
----------
values: ndarray
array to derivate
ax_val: ndarray
axis values
index: int
index of axis along which to derivate
Nper: int
number of periods to replicate
is_aper: bool
True if values is anti-periodic along axis
is_phys: bool
True if physical quantity (time/angle/z)
Returns
-------
values: ndarray
root sum square of values
"""
# To sum dB or dBA
if "dB" in unit:
return my_sum(values, index, Nper, is_aper, unit)
else:
if is_aper and Nper is not None:
# Remove anti-periodicity since values is squared
is_aper = False
if ax_val.size == 1: # Do not use integrate for single point axes
is_phys = False
if is_phys:
values = integrate(values ** 2, ax_val, index, Nper, is_aper, is_phys)
else:
values = my_sum(values ** 2, index, Nper, is_aper, unit)
return np.sqrt(values) | 2af20718dc4d7a6b8d40e939a46d140fda5bf375 | 2,627 |
import json
def comment_on_tweet():
""""
http://127.0.0.1:5000/user/comment_on_tweet
body = {
"id": "5da61dbed78b3b2b10a53582",
"comments" : {
"commenter" : "[email protected]",
"comment" : "comments against tweet : 7"
}
}
"""
data = request.get_json()
tweet_id = data['id']
record = tweetDB.find({'_id': ObjectId(tweet_id)})
if record is None:
return json.dumps({'error': "No collaborations to update matched id"})
else:
try:
if 'comments' in data and isinstance(data['comments'], object):
result = tweetDB.update(
{"_id": ObjectId(tweet_id)},
{
'$push': {
"comments": data['comments']
}
}
)
return json.dumps({"success": True})
except Exception as e:
return json.dumps({"error": "Exception found"}) | 232854a883a4bbd99a46dc3dc46e9a47fb1993dc | 2,628 |
def generate_git_api_header(event, sig):
"""
Create header for GitHub API Request, based on header information from https://developer.github.com/webhooks/.
:param event: Name of the event type that triggered the delivery.
:param sig: The HMAC hex digest of the response body. The HMAC hex digest is generated
using the sha1 hash function and the secret as the HMAC key.
"""
return Headers([
('X-GitHub-Event', event),
('X-GitHub-Delivery', "72d3162e-cc78-11e3-81ab-4c9367dc0958"),
('X-Hub-Signature', f"sha1={sig}"),
('User-Agent', "GitHub-Hookshot/044aadd"),
('Content-Type', "application/json"),
('Content-Length', 6615)
]) | 9b60d9eb6a8ea962bb7426970f2c2b82a229ef12 | 2,629 |
def var_gaussian(r, level=5, modified=False):
"""
Returns the Parametric Gauusian VaR of a Series or DataFrame
If "modified" is True, then the modified VaR is returned,
using the Cornish-Fisher modification
"""
# compute the Z score assuming it was Gaussian
z = norm.ppf(level/100)
if modified:
# modify the Z score based on observed skewness and kurtosis
s = skewness(r)
k = kurtosis(r)
z = (z +
(z**2 - 1)*s/6 +
(z**3 -3*z)*(k-3)/24 -
(2*z**3 - 5*z)*(s**2)/36
)
return -(r.mean() + z*r.std(ddof=0)) | 2ff13a6b222663a200b77e526475331bfacd9c07 | 2,630 |
import math
def lnglat_to_tile(lon, lat, zoom):
"""Get the tile which contains longitude and latitude.
:param lon: longitude
:param lat: latitude
:param zoom: zoom level
:return: tile tuple
"""
lon, lat = truncate(lon, lat)
n = 1 << zoom
tx = int((lon + 180.0) / 360.0 * n)
ty = int((1.0 - math.asinh(math.tan(math.radians(lat))) / math.pi) / 2.0 * n)
return Tile(tx, ty, zoom) | 84e1c103b03a2ec80a9585c8c852045c5d58cb76 | 2,631 |
from typing import Union
from typing import Optional
from typing import Callable
from typing import Any
def group_obs_annotation(
adata: AnnData,
gdata: AnnData,
*,
groups: Union[str, ut.Vector],
name: str,
formatter: Optional[Callable[[Any], Any]] = None,
method: str = "majority",
min_value_fraction: float = 0.5,
conflict: Optional[Any] = None,
inplace: bool = True,
) -> Optional[ut.PandasSeries]:
"""
Transfer per-observation data from the per-observation (cell) ``adata`` to the
per-group-of-observations (metacells) ``gdata``.
**Input**
Annotated ``adata``, where the observations are cells and the variables are genes, and the
``gdata`` containing the per-metacells summed data.
**Returns**
Observations (Cell) Annotations
``<name>``
The per-group-observation annotation computed based on the per-observation annotation.
If ``inplace`` (default: {inplace}), this is written to the ``gdata``, and the function returns
``None``. Otherwise this is returned as a pandas series (indexed by the group observation
names).
**Computation Parameters**
1. Iterate on all the observations (groups, metacells) in ``gdata``.
2. Consider all the cells whose ``groups`` annotation maps them into this group.
3. Consider all the ``name`` annotation values of these cells.
4. Compute an annotation value for the whole group of cells using the ``method``. Supported
methods are:
``unique``
All the values of all the cells in the group are expected to be the same, use this
unique value for the whole groups.
``majority``
Use the most common value across all cells in the group as the value for the whole
group. If this value doesn't have at least ``min_value_fraction`` (default:
{min_value_fraction}) of the cells, use the ``conflict`` (default: {conflict}) value
instead.
"""
group_of_cells = ut.get_o_numpy(adata, groups, formatter=ut.groups_description)
values_of_cells = ut.get_o_numpy(adata, name, formatter=formatter)
value_of_groups = np.empty(gdata.n_obs, dtype=values_of_cells.dtype)
assert method in ("unique", "majority")
if method == "unique":
with ut.timed_step(".unique"):
value_of_groups[group_of_cells] = values_of_cells
else:
assert method == "majority"
with ut.timed_step(".majority"):
for group_index in range(gdata.n_obs):
cells_mask = group_of_cells == group_index
cells_count = np.sum(cells_mask)
assert cells_count > 0
values_of_cells_of_group = values_of_cells[cells_mask]
unique_values_of_group, unique_counts_of_group = np.unique(values_of_cells_of_group, return_counts=True)
majority_index = np.argmax(unique_counts_of_group)
majority_count = unique_counts_of_group[majority_index]
if majority_count / cells_count < min_value_fraction:
value_of_groups[group_index] = conflict
else:
majority_value = unique_values_of_group[majority_index]
value_of_groups[group_index] = majority_value
if inplace:
ut.set_o_data(gdata, name, value_of_groups)
return None
return ut.to_pandas_series(value_of_groups, index=gdata.obs_names) | fc9abd9a983d24869f46efb71d29cd2db53508da | 2,632 |
import os
import yaml
import json
def load_pipeline(path, tunables=True, defaults=True):
"""Load a d3m json or yaml pipeline."""
if not os.path.exists(path):
base_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join('templates', path)
path = os.path.join(base_path, path)
if not os.path.isfile(path):
raise ValueError('Could not find pipeline: {}'.format(path))
LOGGER.warn('Loading pipeline from %s', path)
with open(path) as pipeline:
if path.endswith('yml'):
data = yaml.safe_load(pipeline)
else:
data = json.load(pipeline)
pipeline = Pipeline.from_json_structure(data)
if tunables:
# extract tunable hyperparameters
tunable_hyperparameters = extract_tunable_hyperparams(pipeline)
return pipeline, tunable_hyperparameters
return pipeline | 386683e91af1a5f568e329f70690e676c5c9383d | 2,633 |
def generate_languages(request):
"""
Returns the languages list.
"""
validate_api_secret_key(request.data.get('app_key'))
request_serializer = GenerateLanguagesRequest(data=request.data)
if request_serializer.is_valid():
get_object_or_404(TheUser, auth_token=request.data.get('user_token'))
list_of_languages = Language.objects.all()
return Response({'detail': 'successful',
'data': [language.language for language in list_of_languages]},
status=status.HTTP_200_OK)
else:
return invalid_data_response(request_serializer) | 67856b4bac293e272debb0ac9f2a2e0c863f4cdb | 2,634 |
def all_stocks():
"""
#查询当前所有正常上市交易的股票列表
:return:
"""
data = pro.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,area,industry,list_date')
return data["symbol"].values | 582381319bd0b613758f41de2005e192c802a923 | 2,635 |
import requests
import json
def getBotHash(userID, isCompile=False):
"""Gets the checksum of a user's bot's zipped source code"""
params = {"apiKey": API_KEY, "userID": userID}
if isCompile:
params["compile"] = 1
result = requests.get(MANAGER_URL+"botHash", params=params)
print("Getting bot hash:")
print(result.text)
return json.loads(result.text).get("hash") | 700d5418212836e1ad20a3a336587436cf1e93de | 2,636 |
def next_remote_buffer_uuid(number=1):
"""Return the next uuid of a remote buffer."""
global remote_buffer_counter
if number == 1:
ret = remote_buffer_counter
else:
ret = np.arange(remote_buffer_counter, remote_buffer_counter + number)
remote_buffer_counter = (remote_buffer_counter + number) % (1 << 60)
return ret | da31c68dd199ff765ec6eaab17912dd4e3ea8ee4 | 2,637 |
def ball_collide(i):
"""
This function will handle the ball collide interaction between brick and paddle
:param i: (int) The index of the ball to interact
:return: (Bool) If this ball collide with brick or paddle
"""
global score
collide = False
for j in range(2):
for k in range(2):
object_get = graphics.window.get_object_at(graphics.ball[i].x + graphics.ball[i].width * j,
graphics.ball[i].y + graphics.ball[i].height * k)
if object_get in graphics.brick:
# brick lose life when being hit by ball
index = graphics.brick.index(object_get)
graphics.brick_collide(index)
score += 1
collide = True
elif object_get is graphics.paddle:
collide = True
return collide | 33ee97dde1302578067e16b8251e5c3787901697 | 2,638 |
from pathlib import Path
import tqdm
def gen_sparse_graph(destination_folder: Path,
vertices_number: int,
edge_probability: float) -> Path:
"""
Generates sparse graph
:param destination_folder: directory to save the graph
:type destination_folder: Path
:param vertices_number: number of vertices in the graph
:type vertices_number: int
:param edge_probability: probability of edge existence in the graph
:type edge_probability: float
:return: path to generated graph
:rtype: Path
"""
tmp_graph = nx.generators.fast_gnp_random_graph(vertices_number, edge_probability)
output_graph = rdflib.Graph()
edges = list()
for v, to in tmp_graph.edges():
edges.append((v, 'A', to))
edges.append((v, 'AR', to))
for subj, pred, obj in tqdm(
edges,
desc=f'G{vertices_number}-{edge_probability} generation'
):
add_rdf_edge(subj, pred, obj, output_graph)
target = destination_folder / f'G{vertices_number}-{edge_probability}.xml'
write_to_rdf(target, output_graph)
return target | 79369b7c436ca903e5cbc620b95d6425d5646a55 | 2,639 |
from pyapprox.cython.barycentric_interpolation import \
def multivariate_hierarchical_barycentric_lagrange_interpolation(
x,
abscissa_1d,
barycentric_weights_1d,
fn_vals,
active_dims,
active_abscissa_indices_1d):
"""
Parameters
----------
x : np.ndarray (num_vars, num_samples)
The samples at which to evaluate the interpolant
abscissa_1d : [np.ndarray]
List of interpolation nodes in each active dimension. Each array
has ndim==1
barycentric_weights_1d : [np.ndarray]
List of barycentric weights in each active dimension, corresponding to
each of the interpolation nodes. Each array has ndim==1
fn_vals : np.ndarray (num_samples, num_qoi)
The function values at each of the interpolation nodes
Each column is a flattened array that assumes the nodes
were created with the same ordering as generated by
the function cartesian_product.
if active_abscissa_1d is not None the fn_vals must be same size as
the tensor product of the active_abscissa_1d.
Warning: Python code takes fn_vals as num_samples x num_qoi
but c++ code takes num_qoi x num_samples. Todo change c++ code
also look at c++ code to compute barycentric weights. min() on line 154
seems to have no effect.
active_dims : np.ndarray (num_active_dims)
The dimensions which have more than one interpolation node. TODO
check if this can be simply extracted in this function by looking
at abscissa_1d.
active_abscissa_indices_1d : [np.ndarray]
The list (over each dimension) of indices for which we will compute
barycentric basis functions. This is useful when used with
heirarchical interpolation where the function values will be zero
at some nodes and thus there is no need to compute associated basis
functions
Returns
-------
result : np.ndarray (num_samples,num_qoi)
The values of the interpolant at the samples x
"""
num_act_dims = active_dims.shape[0]
num_abscissa_1d, num_active_abscissa_1d, shifts, abscissa_and_weights, \
active_abscissa_indices_1d = \
barycentric_lagrange_interpolation_precompute(
num_act_dims, abscissa_1d, barycentric_weights_1d,
active_abscissa_indices_1d)
try:
multivariate_hierarchical_barycentric_lagrange_interpolation_pyx
result = \
multivariate_hierarchical_barycentric_lagrange_interpolation_pyx(
x, fn_vals, active_dims,
active_abscissa_indices_1d.astype(np.int_),
num_abscissa_1d.astype(np.int_),
num_active_abscissa_1d.astype(np.int_),
shifts.astype(np.int_), abscissa_and_weights)
if np.any(np.isnan(result)):
raise ValueError('Error values not finite')
except (ImportError, ModuleNotFoundError) as e:
msg = 'multivariate_hierarchical_barycentric_lagrange_interpolation extension failed'
trace_error_with_msg(msg, e)
result = __multivariate_hierarchical_barycentric_lagrange_interpolation(
x, abscissa_1d, fn_vals, active_dims, active_abscissa_indices_1d,
num_abscissa_1d, num_active_abscissa_1d, shifts,
abscissa_and_weights)
return result | da527f226ea2c95fcec160616b060eed08e83e87 | 2,640 |
import pandas as pd
import os
def deaths(path):
"""Monthly Deaths from Lung Diseases in the UK
A time series giving the monthly deaths from bronchitis, emphysema and
asthma in the UK, 1974-1979, both sexes (`deaths`),
P. J. Diggle (1990) *Time Series: A Biostatistical Introduction.*
Oxford, table A.3
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `deaths.csv`.
Returns:
Tuple of np.ndarray `x_train` with 72 rows and 2 columns and
dictionary `metadata` of column headers (feature names).
"""
path = os.path.expanduser(path)
filename = 'deaths.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/MASS/deaths.csv'
maybe_download_and_extract(path, url,
save_file_name='deaths.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | 05ff7646d2c7a5a6368b9453ef7c8c80f1348b1c | 2,641 |
def read_csv(path):
"""Reads the CSV file at the indicated path and returns a list of rows.
Parameters:
path (str): The path to a CSV file.
Returns:
list[row]: A list of rows. Each row is a list of strings and numbers.
"""
with open(path, 'rb') as f:
return decode_csv(f.read()) | 7b979a9e15ae07cbdb2733ec071ea82664df5bab | 2,642 |
def obj_mask(im):
"""Computes the mask for an image with transparent background
Keyword arguments:
im -- the input image (must be RGBA)
"""
A = im.split()[-1]
T = ImageOps.invert(A)
return Image.merge("RGBA", (T, T, T, A)) | bfcb6c9c8877dc2507bc9bc658eeb1140fc950bc | 2,643 |
def rnn(rnn_type, inputs, length, hidden_size, layer_num=1,
dropout_keep_prob=None, concat=True):
"""
Implements (Bi-)LSTM, (Bi-)GRU and (Bi-)RNN
在这个module中,rnn是主要的接口,所以把rnn放在上面
Args:
rnn_type: the type of rnn, such as lstm
inputs: padded inputs into rnn, usually a d*p or l*p matrix
length: the valid length of the inputs,
usually the length of the sentence
hidden_size: the size of hidden units
layer_num: multiple rnn layer are stacked if layer_num > 1
dropout_keep_prob: dropout in RNN
concat: When the rnn is bidirectional, the forward outputs and backward
outputs are concatenated (such as a 2l*p matrix) if this is True,
else we add them (add two matrices).
Returns:
RNN outputs and final state (such as the state of lstm)
"""
if not rnn_type.startswith('bi'):
cell = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)
# 得到cell,在z轴、y轴已经展开,但是在x轴上并没有延展
outputs, state = tf.nn.dynamic_rnn(cell, inputs,
sequence_length=length,
dtype=tf.float32)
# 利用dynamic_rnn函数对cell在x轴方向上进行延展,并且把cell的inputs输入
# outputs的维度是hidden_size*length, state的维度是hidden_size*layer_num*2
if rnn_type.endswith('lstm'):
c, h = state
state = h
# 把hidden state作为state
else: # bidirectional rnn
cell_fw = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)
# forward cell
cell_bw = get_cell(rnn_type, hidden_size, layer_num, dropout_keep_prob)
# backward cell
outputs, state = tf.nn.bidirectional_dynamic_rnn(
cell_bw, cell_fw, inputs, sequence_length=length, dtype=tf.float32
)
# 双向rnn相比单向rnn,在hidden_size这个维度上变成了之前的2倍
state_fw, state_bw = state
# 首先把state分离成forward state和backward state
if rnn_type.endswith('lstm'):
c_fw, h_fw = state_fw
c_bw, h_bw = state_bw
state_fw, state_bw = h_fw, h_bw
# 对于lstm来说,我们要的state是hidden state
if concat:
outputs = tf.concat(outputs, 2)
# 把两个tensor沿着hidden_size的维度连起来
state = tf.concat([state_fw, state_bw], 1)
# state同样要沿着hidden_size的维度连起来
else:
outputs = outputs[0] + outputs[1]
state = state_fw + state_bw
# 简单向量(张量)相加或者做平均处理
return outputs, state | 80d06ed499c4668bd398efdf9358c8d72e2e3192 | 2,644 |
def find_expired(bucket_items, now):
"""
If there are no expired items in the bucket returns
empty list
>>> bucket_items = [('k1', 1), ('k2', 2), ('k3', 3)]
>>> find_expired(bucket_items, 0)
[]
>>> bucket_items
[('k1', 1), ('k2', 2), ('k3', 3)]
Expired items are returned in the list and deleted from
the bucket
>>> find_expired(bucket_items, 2)
['k1']
>>> bucket_items
[('k2', 2), ('k3', 3)]
"""
expired_keys = []
for i in range(len(bucket_items) - 1, -1, -1):
key, expires = bucket_items[i]
if expires < now:
expired_keys.append(key)
del bucket_items[i]
return expired_keys | 476fd079616e9f5c9ed56ee8c85171fcb0ddb172 | 2,645 |
import array
def find_sprites(image=None, background_color=None):
""" Find sprites
@image: MUST be an Image object
@background_color: optinal, whether tuple (RGB/ RGBA) or int (grayscale)
"""
def find_sprites_corners(sprite, label_map, numpy_array):
columns = set()
rows = set()
for row_index, row in enumerate(numpy_array):
for column_index, column in enumerate(row):
current_pixel = label_map[row_index][column_index]
if current_pixel.label == sprite:
columns.add(current_pixel.column)
rows.add(current_pixel.row)
return min(columns), min(rows), max(columns), max(rows)
def collect_sprites(exist_sprites_label, label_map, numpy_array):
""" Return A dictionary with key:the label of a sprite and value:it's Sprite object
"""
sprites = {}
for sprite in exist_sprites_label:
top_left_column, top_left_row, bottom_right_column, bottom_right_row = find_sprites_corners(sprite, label_map, numpy_array)
sprites[sprite] = Sprite(sprite, top_left_column, top_left_row, bottom_right_column, bottom_right_row)
return sprites
def search_exist_sprites_label(pixels_to_sprites):
""" Return a set of exist sprite's label inside the map
"""
exist_sprites = set()
for key in pixels_to_sprites:
exist_sprites.add(pixels_to_sprites[key])
return exist_sprites
def unify_sprites(pixels_to_sprites, unified_matrix, numpy_array):
""" Unify all pixels that are in a same sprite
Return a 2D-array map of sprites
"""
for row_index, row in enumerate(numpy_array):
for column_index, column in enumerate(row):
current_pixel = pixels_matrix[row_index][column_index]
current_label = current_pixel.label
# Ignore background pixels
if current_label == 0 or current_label not in pixels_to_sprites:
continue
current_pixel.label = pixels_to_sprites[current_label]
return unified_matrix
def analyze_connected_sprites(connected_sprites):
""" Find all pixels that are connected (belong to a same sprite)
Return a dict:
key: pixel'label
value: sprite's label that key belong to
"""
pixels_to_sprites = {}
for key in list(connected_sprites.keys()):
if key not in connected_sprites or len(connected_sprites[key]) == 1:
continue
in_progress = True
old_length = len(connected_sprites[key])
while in_progress:
for value in connected_sprites[key]:
if value not in connected_sprites:
continue
connected_sprites[key] = connected_sprites[key] | connected_sprites[value]
if value in connected_sprites and value != key:
del connected_sprites[value]
if old_length == len(connected_sprites[key]):
in_progress = False
else:
old_length = len(connected_sprites[key])
for key in connected_sprites:
for value in connected_sprites[key]:
pixels_to_sprites[value] = key
return pixels_to_sprites
def is_new_sprite(current_row, current_column, pixels_matrix, background_color):
""" Return False if there is a non-background pixel adjacent to current pixel
Ignores background pixels.
"""
neighbor_coordinates = [(-1, -1), (-1, 0), (-1, 1), (0, -1)]
current_pixel = pixels_matrix[current_row][current_column]
is_new_sprite = True
# Ignore background pixels
if current_pixel.is_background_pixel:
return False
# Check 4 neighbor of current pixels
for coordinate in neighbor_coordinates:
neighbor_row = current_row + coordinate[0]
neighbor_column = current_column + coordinate[1]
if 0 <= neighbor_row < image_height and 0 <= neighbor_column < image_width:
neighbor_pixel = pixels_matrix[neighbor_row][neighbor_column]
if neighbor_pixel.label == 0:
continue
if current_pixel.label != 0 and current_pixel.label != neighbor_pixel.label:
connected_sprites.setdefault(current_pixel.label, set()).add(neighbor_pixel.label)
else:
pixels_matrix[current_row][current_column].label = neighbor_pixel.label
is_new_sprite = False
return is_new_sprite
def is_ignored_pixel(current_pixel, numpy_array):
""" Check if that pixel is considered background pixel
Return False by default
"""
if (background_color == (0,0,0,0) and current_pixel[-1] == 0) or (current_pixel == array(background_color)).all() or (image.mode == "L" and current_pixel == background_color):
return True
return False
def analyze_numpy_array(background_color):
""" Convert image to numpy array then analyze each pixel
@background_color: RGBA or RGB or grayscale formats
Return Maps of pixels under format matrix and numpy array (multi-dimensional)
"""
numpy_array = array(image)
pixels_matrix = zeros(numpy_array.shape, dtype=int).tolist()
for row_index, row in enumerate(numpy_array):
for column_index, column in enumerate(row):
current_pixel = numpy_array[row_index, column_index]
pixels_matrix[row_index][column_index] = Pixel(row_index, column_index, is_ignored_pixel(current_pixel, numpy_array))
for row_index, row in enumerate(numpy_array):
for column_index, column in enumerate(row):
if is_new_sprite(row_index, column_index, pixels_matrix, background_color):
new_label = sprites_label[-1] + 1
pixels_matrix[row_index][column_index].label = new_label
sprites_label.append(new_label)
connected_sprites.setdefault(new_label, set()).add(new_label)
return pixels_matrix, numpy_array
def is_valid_background_color():
""" Check if arg @background_color is valid
Return True by default
"""
# Not int or tuple
if type(background_color) not in (int, tuple):
return False
# Invalid grayscale format
if type(background_color) == int:
if not 255 >= background_color >= 0 or image.mode != "L":
return False
# Invalid RGB/ RGBA format
if type(background_color) == tuple:
if len(background_color) not in (3,4) or image.mode == "L":
return False
for element in background_color:
if type(element) != int or not 255 >= element >= 0:
return False
return True
if background_color:
pass
elif image.mode == "RGBA":
background_color = (0,0,0,0)
else:
background_color = find_most_common_color(image)
# Check validation of arg background_color
if not is_valid_background_color() or not image:
print("Invalid arguments! Please try again!")
return
image_width, image_height = image.size
# Store all connected sprites that can be unified latter
connected_sprites = {}
# List of pixels label exist inside the map
sprites_label = [0]
# Maps of pixels under format matrix and numpy array
pixels_matrix, numpy_array = analyze_numpy_array(background_color)
# Dict of pixels'label corresponding to sprite's label
pixels_to_sprites = analyze_connected_sprites(connected_sprites)
# Map of sprites under format 2D-matrix
label_map = unify_sprites(pixels_to_sprites, pixels_matrix, numpy_array)
# Set of sprite-label that exist inside the map
exist_sprites_label = search_exist_sprites_label(pixels_to_sprites)
# A dictionary with key:the label of a sprite and value:it's Sprite object
sprites = collect_sprites(exist_sprites_label, label_map, numpy_array)
return (sprites, label_map) | 67a544e916ebd01fbddd16f755e386d820507433 | 2,646 |
def get_java_package(path):
"""Extract the java package from path"""
segments = path.split("/")
# Find different root start indecies based on potential java roots
java_root_start_indecies = [_find(segments, root) for root in ["java", "javatests"]]
# Choose the root that starts earliest
start_index = min(java_root_start_indecies)
if start_index == len(segments):
fail("Cannot find java root: " + path)
return ".".join(segments[start_index + 1:]) | 253e503a146cffe6a8c00786539d8e3a2d6374f7 | 2,647 |
import os
def generate_seekr2_model_and_filetree(model_input, force_overwrite):
"""
Using the Model_input from the user, prepare the Model
object and the filetree. Then prepare all building files
for each anchor and serialize the Model to XML.
"""
model = common_prepare.model_factory(model_input)
common_prepare.prepare_model_cvs_and_anchors(model, model_input)
root_directory = os.path.expanduser(model_input.root_directory)
xml_path = os.path.join(root_directory, "model.xml")
if os.path.exists(xml_path):
# then a model file already exists at this location: update
# the anchor directories.
old_model = base.Model()
old_model.deserialize(xml_path)
common_prepare.modify_model(old_model, model, root_directory,
force_overwrite)
filetree.generate_filetree(model, root_directory)
filetree.copy_building_files(model, model_input, root_directory)
common_prepare.generate_bd_files(model, root_directory)
model.serialize(xml_path)
return model, xml_path | 1309c8106f4b52b14c6b6c6760cefec5ea7749a5 | 2,648 |
def get_plugin():
"""Return the filter."""
return TextFilter | b0d43cab9c3b887fd9735ecfdc5372a8e2aefb49 | 2,649 |
import time
def caltech256(root):
"""Caltech256 dataset from http://www.vision.caltech.edu/Image_Datasets/Caltech256
Pictures of objects belonging to 256 categories.
About 80 to 800 images per category.
Collected in September 2003 by Fei-Fei Li, Marco Andreetto,
and Marc 'Aurelio Ranzato.
The size of each image is roughly 300 x 200 pixels.
We have carefully clicked outlines of each object in these pictures,
these are included under the 'Annotations.tar'.
There is also a matlab script to view the annotaitons, 'show_annotations.m'.
Attention: if exist dirs `root/caltech256`, api will delete it and create it.
Data storage directory:
root = `/user/.../mydata`
caltech256 data:
`root/caltech256/train/007.bat/xx.jpg`
`root/caltech256/train/010.beer-mug/xx.ipg`
`root/caltech256/train/064.elephant-101/xx.jpg`
Args:
root: str, Store the absolute path of the data directory.
example:if you want data path is `/user/.../mydata/caltech256`,
root should be `/user/.../mydata`.
Returns:
Store the absolute path of the data directory, is `root/caltech256`.
"""
start = time.time()
task_path = assert_dirs(root, 'caltech256', make_root_dir=False)
url = "http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar"
rq.files(url, gfile.path_join(root, url.split('/')[-1]))
un_tar(gfile.path_join(root, url.split('/')[-1]), task_path)
gfile.rename(gfile.path_join(task_path, '256_ObjectCategories'), gfile.path_join(task_path, 'train'))
gfile.remove(gfile.path_join(root, '256_ObjectCategories.tar'))
print('caltech256 dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return task_path | 972cec00a3360fe0ace5b1fb8165e45718c137c1 | 2,650 |
def draw__mask_with_edge(cv2_image: np.ndarray, edge_size: int = 10) -> np.ndarray:
"""
From a color image, get a black white image each instance separated by a border.
1. Change a color image to black white image.
2. Get edge image from `cv2_image`, then invert it to separate instance by a border.
3. Merge 1 and 2.
.. image:: https://i.imgur.com/YAHVVSl.png
:width: 2496px
:height: 1018px
:scale: 25%
:alt: mask_with_edge
:align: center
Parameters
----------
cv2_image : np.ndarray
BGR color Image
edge_size : int
Edge size, by default 10
Returns
-------
np.ndarray
Grayscale image each instance separated by a border.
Examples
--------
>>> cv2_img: np.ndarray = cv2.imread("...")
>>> edge_masked_image: np.ndarray = mask_with_edge(cv2_img, edge_size=10)
"""
img_edge = draw__edge_only(cv2_image, edge_size)
not_img_edge = cv2.bitwise_not(img_edge)
bw_image = img_color_to_bw(cv2_image)
return mask_image(bw_image, mask_image=not_img_edge) | 50a25b60fdfa83f8cd1ec707f4c0e63b3c621695 | 2,651 |
def get_functions(pdb_file):
"""Get the offset for the functions we are interested in"""
methods = {'ssl3_new': 0,
'ssl3_free': 0,
'ssl3_connect': 0,
'ssl3_read_app_data': 0,
'ssl3_write_app_data': 0}
try:
# Do this the hard way to avoid having to load
# the types stream in mammoth PDB files
pdb = pdbparse.parse(pdb_file, fast_load=True)
pdb.STREAM_DBI.load()
pdb._update_names()
pdb.STREAM_GSYM = pdb.STREAM_GSYM.reload()
if pdb.STREAM_GSYM.size:
pdb.STREAM_GSYM.load()
pdb.STREAM_SECT_HDR = pdb.STREAM_SECT_HDR.reload()
pdb.STREAM_SECT_HDR.load()
# These are the dicey ones
pdb.STREAM_OMAP_FROM_SRC = pdb.STREAM_OMAP_FROM_SRC.reload()
pdb.STREAM_OMAP_FROM_SRC.load()
pdb.STREAM_SECT_HDR_ORIG = pdb.STREAM_SECT_HDR_ORIG.reload()
pdb.STREAM_SECT_HDR_ORIG.load()
except AttributeError:
pass
try:
sects = pdb.STREAM_SECT_HDR_ORIG.sections
omap = pdb.STREAM_OMAP_FROM_SRC
except AttributeError:
sects = pdb.STREAM_SECT_HDR.sections
omap = DummyOmap()
gsyms = pdb.STREAM_GSYM
if not hasattr(gsyms, 'globals'):
gsyms.globals = []
#names = []
for sym in gsyms.globals:
try:
name = sym.name.lstrip('_').strip()
if name.startswith('?'):
end = name.find('@')
if end >= 0:
name = name[1:end]
#names.append(name)
if name in methods:
off = sym.offset
virt_base = sects[sym.segment-1].VirtualAddress
addr = omap.remap(off+virt_base)
if methods[name] == 0:
methods[name] = addr
else:
methods[name] = -1
except IndexError:
pass
except AttributeError:
pass
#with open('names.txt', 'wb') as f_out:
# for name in names:
# f_out.write(name + "\n")
return methods | e2a36d3799004c1f96d5bccb3c4f0a8ad3ce2607 | 2,652 |
import typing
def empty_iterable() -> typing.Iterable:
"""
Return an empty iterable, i.e., an empty list.
:return: an iterable
:Example:
>>> from flpy.iterators import empty_iterable
>>> empty_iterable()
[]
"""
return list() | 904fe365abf94f790f962c9a49f275a6068be4f0 | 2,653 |
from re import M
def nearest_pow_2(x):
"""
Finds the nearest integer that is a power of 2.
In contrast to :func:`next_pow_2` also searches for numbers smaller than
the input and returns them if they are closer than the next bigger power
of 2.
"""
a = M.pow(2, M.ceil(M.log(x, 2)))
b = M.pow(2, M.floor(M.log(x, 2)))
if abs(a - x) < abs(b - x):
return int(a)
else:
return int(b) | c9dba6f38badcedee02f7071fc5fcf82519dbdcb | 2,654 |
def timestamp_preprocess(ds, column, name):
"""This function takes the timestamp in the dataset and create from it features according to the settings above
Args:
ds ([dataframe]): dataset
column ([integer]): column index
name ([string]): column name
Returns:
[dataframe]: dataset after transformation
"""
ts = pd.to_datetime(ds[name])
for feature in TIMESTAMP_FEATURES.keys():
if TIMESTAMP_FEATURES[feature] is not None:
if feature == "timestamp":
ds[feature] = ts
elif feature == "day_of_week":
ds[feature] = ts.apply(lambda X: X.day_of_week)
elif feature == "day_of_month":
ds[feature] = ts.apply(lambda X: X.day)
elif feature == "month":
ds[feature] = ts.apply(lambda X: X.month)
elif feature == "hour":
ds[feature] = ts.apply(lambda X: X.hour)
elif feature == "minute":
ds[feature] = ts.apply(lambda X: X.minute)
elif feature == "year":
ds[feature] = ts.apply(lambda X: X.year)
return ds | 18203f8e9a016d3302d5fe06d498d68403eb5805 | 2,655 |
def make_taubin_loss_function(x, y):
"""closure around taubin_loss_function to make
surviving pixel positions availaboe inside.
x, y: positions of pixels surviving the cleaning
should not be quantities
"""
def taubin_loss_function(xc, yc, r):
"""taubin fit formula
reference : Barcelona_Muons_TPA_final.pdf (slide 6)
"""
upper_term = (((x - xc) ** 2 + (y - yc) ** 2 - r ** 2) ** 2).sum()
lower_term = (((x - xc) ** 2 + (y - yc) ** 2)).sum()
return np.abs(upper_term) / np.abs(lower_term)
return taubin_loss_function | b11aae3586cb387a6e280f5b0e985dcf6364306e | 2,656 |
def init_rf_estimator():
"""
Instantiate a Random forest estimator with the optimized hyper-parameters.
:return: The RandomForest estimator instance.
"""
rf = RandomForestClassifier(
criterion=RF_CRIT,
min_samples_leaf=RF_MIN_SAMPLES_LEAF,
max_features='auto',
n_estimators=RF_N_ESTS,
n_jobs=-1)
return rf | 1171b5582869151823da29c61545c857e04ffed6 | 2,657 |
def dict_filter(d, exclude=()):
"""
Exclude specified keys from a nested dict
"""
def fix_key(k):
return str(k) if isinstance(k, builtin_str) else k
if isinstance(d, list):
return [dict_filter(e, exclude) for e in d]
if isinstance(d, dict):
items = ((fix_key(k), v) for k, v in d.items())
return {
k: dict_filter(v, exclude) for k, v in items if k not in exclude
}
return d | afa87c730fd105741a3bf95601d682fa817b903d | 2,658 |
async def mongoengine_multiple_objects_exception_handler(request, exc):
"""
Error handler for MultipleObjectsReturned.
Logs the MultipleObjectsReturned error detected and returns the
appropriate message and details of the error.
"""
logger.exception(exc)
return JSONResponse(
Response(success=False, error_code=422, message=str(exc)).dict()
) | c0e3d8d25ee02b9240cbf02f532cb853cbc693ee | 2,659 |
def _get_sample_times(*traces, **kwargs):
"""Get sample times for all the traces."""
# Set the time boundaries for the DataFrame.
max_stop_time = max(
[trace.stop_time() for trace in traces if isinstance(trace, Trace)]
)
stop_time = kwargs.pop("stop_time", max_stop_time)
min_start_time = min(
[trace.start_time() for trace in traces if isinstance(trace, Trace)]
)
start_time = kwargs.pop("start_time", min_start_time)
# Get all the sample times of all the traces between the start and stop times.
times = set([start_time, stop_time])
for trace in traces:
times.update(
set(trace.get_sample_times(start_time=start_time, stop_time=stop_time))
)
# If requested, fill in additional times between sample times.
step = kwargs.pop("step", 0)
if step:
times.update(set(range(start_time, stop_time + 1, step)))
# Sort sample times in increasing order.
times = sorted(list(times))
return times | 3e20bed62017e8306b3489ec41b7f6cd59a4c916 | 2,660 |
import math
def get_weak_model(op, diff_type, nonzero2nonzero_weight, zero2zero_weight=0,
zero2nonzero_weight=math.inf, nonzero2zero_weight=math.inf, precision=0):
"""Return the weak model of the given bit-vector operation ``op``.
Given the `Operation` ``op``, return the
`WeakModel` of ``op`` for the `Difference` type ``diff_type``
with given class attributes ``nonzero2nonzero_weight``,
``zero2zero_weight``,
``zero2nonzero_weight``, ``nonzero2zero_weight`` and
``precision`` (see `WeakModel`).
The returned model is a subclass of `WeakModel` and `OpModel`.
.. note::
To link the returned model ``MyModel`` to ``op``
such that ``MyModel`` is used in ``propagate``,
set the ``xor_model`` or ``rx_model`` attribute of ``op``
to ``MyModel`` (e.g., ``op.xor_model = MyModel``).
See also `differential.difference.XorDiff.propagate`
or `differential.difference.RXDiff.propagate`.
::
>>> from cascada.bitvector.core import Constant, Variable
>>> from cascada.bitvector.secondaryop import LutOperation
>>> from cascada.differential.difference import XorDiff
>>> from cascada.differential.opmodel import get_weak_model
>>> class MyLut(LutOperation): pass # a 2-bit function
>>> XorWeakModelMyLut = get_weak_model(MyLut, XorDiff, decimal.Decimal(1.5), precision=1)
>>> alpha, beta = XorDiff(Variable("a", 2)), XorDiff(Variable("b", 2))
>>> f = XorWeakModelMyLut(alpha)
>>> print(f.vrepr())
XorWeakModelMyLut(XorDiff(Variable('a', width=2)))
>>> f.validity_constraint(beta)
(((a == 0b00) & (b == 0b00)) == 0b1) | ((~(a == 0b00) & ~(b == 0b00)) == 0b1)
>>> f.bv_weight(beta)
Ite(((a == 0b00) & (b == 0b00)) == 0b1, 0b00, 0b11)
>>> f.max_weight(), f.weight_width(), f.error(), f.num_frac_bits()
(3, 2, 0, 1)
"""
assert issubclass(op, operation.Operation)
if diff_type == difference.XorDiff:
prefix = "Xor"
assert zero2zero_weight == 0
# for XOR differentials with Pr. 1, an input property propagates to a unique output property
assert zero2nonzero_weight == math.inf
elif diff_type == difference.RXDiff:
prefix = "RX"
else:
raise ValueError(f"invalid diff_type {diff_type}")
_op, _diff_type = op, diff_type
_zero2zero_weight = zero2zero_weight
_nonzero2nonzero_weight = nonzero2nonzero_weight
_zero2nonzero_weight, _nonzero2zero_weight = zero2nonzero_weight, nonzero2zero_weight
_precision = precision
class MyWeakModel(abstractproperty.opmodel.WeakModel, OpModel):
op, diff_type = _op, _diff_type
zero2zero_weight = _zero2zero_weight
nonzero2nonzero_weight = _nonzero2nonzero_weight
zero2nonzero_weight = _zero2nonzero_weight
nonzero2zero_weight = _nonzero2zero_weight
precision = _precision
# def error(self): # maximum weight of a differential with n-bit input is n
# return sum(p.val.width for p in self.input_prop)
MyWeakModel.__name__ = f"{prefix}{abstractproperty.opmodel.WeakModel.__name__}{op.__name__}"
return MyWeakModel | be34db3112ff7788bb96e6d6cc467d4d98d8af51 | 2,661 |
def get_temp():
"""
読み込んだ温度を返す
"""
return sensor.t | a4c7ed616af202599581cd47be87cb10ea571947 | 2,662 |
def load_clean_yield_data(yield_data_filepath):
"""
Cleans the yield data by making sure any Nan values in the columns we care about
are removed
"""
important_columns = ["Year", "State ANSI", "County ANSI", "Value"]
yield_data = pd.read_csv(yield_data_filepath).dropna(
subset=important_columns, how="any"
)
return yield_data | 14c5facc947d1ff8bcc7714447e9da3b7842bcee | 2,663 |
def create_element_mapping(repnames_bedfile):
"""Create a mapping of the element names to their classes and families"""
elem_key = defaultdict(lambda : defaultdict(str))
with open(repnames_bedfile, "r") as bed:
for line in bed:
l = line.strip().split("\t")
name = l[3]
class_ = l[4]
family = l[5]
elem_key[name]["class"] = class_
elem_key[name]["family"] = family
return elem_key | d3bc0491625d318b8f049c71a10571c21caf03d8 | 2,664 |
def _get_CRABI_iterators(captcha_dataframe,
train_indices,
validation_indices,
batch_size,
image_height,
image_width,
character_length,
categories):
"""
(HELPER FUNCTION)
Args:
captcha_dataframe (pandas.DataFrame): the dataset for training
train_indices (numpy.ndarray): indices of the CAPTCHA dataset used for training data
validation_indices (numpy.ndarray): indices of the CAPTCHA dataset used for validation data
batch_size (int): number of samples to process before the model is updated
image_height (int): height (in pixels) of expected input CAPTCHA image
image_width (int): width (in pixels) of expected input CAPTCHA image
character_length (int): number of characters in expected input CAPTCHA image
categories (int): number of possible characters in expected input
CAPTCHA image, specifying category count in the output layer
('10' for digits 0-9, '26' for alphabet, '36' for alphanumeric)
Returns:
pair of generator objects -> (training_set_iterator, validation_set_iterator)
"""
training_set_iterator = generate_CRABI_preprocessed_images(captcha_dataframe,
train_indices,
for_training=True,
batch_size=batch_size,
image_height=image_height,
image_width=image_width,
categories=categories)
validation_set_iterator = generate_CRABI_preprocessed_images(captcha_dataframe,
validation_indices,
for_training=True,
batch_size=batch_size,
image_height=image_height,
image_width=image_width,
categories=categories)
return training_set_iterator, validation_set_iterator | 7e01586f359860b5d1e461e9612b164e6cf9365f | 2,665 |
import uuid
def run(request, context):
"""Creates a template.
Args:
request (orchestrate_pb2.CreateTemplateRequest): Request payload.
context: Context.
Returns:
A orchestrate_pb2.CreateTemplate with the status of the request.
"""
template = request.template
print('Orchestrate.CreateTemplate name={name} project={project}'.format(
name=template.name,
project=template.project,
))
request_id = uuid.uuid4().hex
try:
# Make sure data is valid before creating individual sizes - don't want to
# clean-up half-way or leave incomplete template families.
for size in template.sizes:
validate_metadata(template, size)
# Data checks out. let's create all template sizes.
for size in template.sizes:
create_template_size(template, size)
return orchestrate_pb2.CreateTemplateResponse(
status='CREATED',
request_id=str(request_id),
)
except errors.HttpError as exception:
if exception.resp.status == 409:
message = 'A template with name {name} already exists.'.format(
name=template.name)
raise OrchestrateTemplateCreationError(message)
else:
raise | 484de4399b23bbc71e35ad70b054c1a62c41952e | 2,666 |
def fit_2dgaussian(data, error=None, mask=None):
"""
Fit a 2D Gaussian to a 2D image.
Parameters
----------
data : array_like
The 2D array of the image.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
result : `~astropy.modeling.functional_models.Gaussian2D` instance
The best-fitting Gaussian 2D model.
"""
if error is not None:
weights = 1.0 / error
else:
weights = None
if mask is not None:
if weights is None:
weights = np.ones_like(data)
# down-weight masked pixels
weights[mask] = 1.e-20
props = data_properties(data, mask=mask)
init_amplitude = np.ptp(data)
g_init = models.Gaussian2D(
init_amplitude, props.xcentroid.value, props.ycentroid.value,
props.semimajor_axis_sigma.value, props.semiminor_axis_sigma.value,
props.orientation.value)
fitter = LevMarLSQFitter()
y, x = np.indices(data.shape)
gfit = fitter(g_init, x, y, data, weights=weights)
return gfit | 6ac3c7b7cba17baba719bd1d1fc87030f9c45dca | 2,667 |
from typing import List
from pathlib import Path
import os
def list_dir_files(path: str, suffix: str = "") -> List[str]:
"""
Lists all files (and only files) in a directory, or return [path] if path is a file itself.
:param path: Directory or a file
:param suffix: Optional suffix to match (case insensitive). Default is none.
:return: list of absolute paths to files
"""
if suffix:
suffix = suffix.lower()
if Path(path).is_file():
files = [os.path.abspath(path)]
else:
files = []
for f in os.listdir(path):
file_path = os.path.join(path, f)
if Path(file_path).is_file():
if not suffix or f.lower().endswith(suffix):
files.append(os.path.abspath(file_path))
return list(sorted(files)) | aaba7de5d5f67c5addc054010c5a2bd811475a3e | 2,668 |
def to_roman(number):
"""
Converts an arabic number within range from 1 to 4999 to the
corresponding roman number. Returns None on error conditions.
"""
try:
return roman.toRoman(number)
except (roman.NotIntegerError, roman.OutOfRangeError):
return None | 48fbe99caa527e711f8d0285577d96941a34b9c9 | 2,669 |
def GDAL_like(filename, fileout=""):
"""
GDAL_like
"""
BSx, BSy, Mb, Nb, M, N = 0,0, 0,0, 0,0
dataset1 = gdal.Open(filename, gdal.GA_ReadOnly)
dataset2 = None
if dataset1:
band1 = dataset1.GetRasterBand(1)
M, N = int(dataset1.RasterYSize), int(dataset1.RasterXSize)
B = dataset1.RasterCount
BSx, BSy = band1.GetBlockSize()
Nb = int(N / BSx) + (0 if N % BSx == 0 else 1)
Mb = int(M / BSy) + (0 if M % BSy == 0 else 1)
CO = ["BIGTIFF=YES"]
options = dataset1.GetMetadata("IMAGE_STRUCTURE")
if BSy > 1:
CO += ["TILED=YES", "BLOCKXSIZE=%d" % BSx, "BLOCKYSIZE=%d" % BSy]
for key in options:
if key == "COMPRESSION":
CO.append("COMPRESS=" + options[key])
else:
CO.append(key + "=" + options[key])
driver = gdal.GetDriverByName("GTiff")
fileout = fileout if fileout else forceext(filename, "copy.tif")
dataset2 = driver.Create(fileout, N, M, B, band1.DataType, CO)
dataset2.SetProjection(dataset1.GetProjection())
dataset2.SetGeoTransform(dataset1.GetGeoTransform())
for j in range(1, B + 1):
band1 = dataset1.GetRasterBand(j)
band2 = dataset2.GetRasterBand(j)
if band1.GetNoDataValue() != None:
band2.SetNoDataValue(band1.GetNoDataValue())
else:
band2.SetNoDataValue(np.nan)
dataset1 = None
return (dataset2, BSx, BSy, Mb, Nb, M, N) | 34d4ea83a7c7e1726aa1d5a4d89e16bbed50cdd1 | 2,670 |
def take_attendance(methodcnt):
"""global setup_bool
if (setup_bool == False or methodcnt == False):
print ("in if statement")
setup_bool = True
else:"""
print ("checking in - F.R.")
react_with_sound(attendance_final)
client.CheckIn()
return 2 | 0ecdf80e59de5d968f7adc042d6be369367f4195 | 2,671 |
def feature_selection(data, features):
"""
Choose which features to use for training.
:param data: preprocessed dataset
:param features: list of features to use
:return: data with selected features
"""
return data[features] | 6303e52a9c64acfbb5dcfd115b07b3bef2942821 | 2,672 |
def parse_docstring(docstring, line=0, filename='<string>', logger=None,
format_name=None, options=None):
# type: (str, int, Any, Optional[logging.Logger], Optional[str], Any) -> Tuple[OrderedDict[str, Arg], Optional[Arg]]
"""
Parse the passed docstring.
The OrderedDict holding parsed parameters may be sparse.
Parameters
----------
docstring : str
line : int
start line of the docstring
logger : Optional[logging.Logger]
format_name : Optional[str]
Returns
-------
params : OrderedDict[str, Arg]
results : Optional[Arg]
"""
if format_name is None or format_name == 'auto':
format_cls = guess_format(docstring)
if format_cls is None:
format_cls = RestFormat
else:
format_cls = format_map[format_name]
format = format_cls(line, filename=filename, logger=logger,
options=options)
return format.parse(docstring) | 47cd0318f24ec1a26233ad6e98a398a4c9e95db6 | 2,673 |
def srCyrillicToLatin(cyrillic_text):
"""
Return a conversion of the given string from cyrillic to latin, using
'digraph' letters (this means that e.g. "nj" is encoded as one character). Unknown
letters remain unchanged.
CAVEAT: this will ONLY change letters from the cyrillic subset of Unicode.
For instance, the plain ASCII letter "C" (code point 0x0043) will NOT be converted
to "S", as opposed to the cyrillic letter "C" (code point 0x0421), which WILL be converted.
If you are sure that your cyrillic string does not contain latin portions (e.g. quoted text,
company names), you can "normalize" it to cyrillic by using srNormalizeToCyrillic first.
"""
return __translate_string(cyrillic_text, __cyrillic_to_latin) | cd4850b6c0bcf9b27aa1340dc98956c026e8f557 | 2,674 |
def from_phone(func=None):
"""来自手机的消息(给自己发的) FriendMsg"""
if func is None:
return from_phone
async def inner(ctx):
assert isinstance(ctx, FriendMsg)
if ctx.MsgType == MsgTypes.PhoneMsg:
return await func(ctx)
return None
return inner | 8e47e82e014d3d727a615e310997cd2c634ae821 | 2,675 |
from typing import Iterator
from typing import Tuple
from typing import List
from typing import Callable
import torch
import sys
def fit_and_validate_readout(data: Iterator[Tuple[Tensor, Tensor]], regularization_constants: List[float],
get_validation_error: Callable[[Tuple[Tensor, Tensor]], float],
verbose: bool = False) -> Tuple[Tensor, Tensor]:
"""
Ridge regression for big data, with efficient regularization selection
Fits a linear model :math:`y = W x + b` with regularization.
See:
T. Zhang & B. Yang (2017). An exact approach to ridge regression for big data.
Computational Statistics, 32(3), 909–928. https://doi.org/10.1007/s00180-017-0731-5
:param data: Batch dataset of pairs (x, y) with samples on rows
:param regularization_constants: Regularization constants for ridge regression (including none)
:param get_validation_error: Evaluate validation error for a regression pair (W, b)
:param verbose: Whether to print validation info (default false)
:return: A pair of tensors (W, b)
"""
# Compute sufficient statistics for regression
x, y = next(data)
Syy = y.square().sum(dim=0) # (targets)
Sxy = x.t() @ y # (features × targets)
Sxx = x.t() @ x # (features × features)
Sy = y.sum(dim=0) # (targets)
Sx = x.sum(dim=0) # (features)
n = float(x.shape[0]) # samples
for x, y in data:
Syy += y.square().sum(dim=0)
Sxy += x.t() @ y
Sxx += x.t() @ x
Sy += y.sum(dim=0)
Sx += x.sum(dim=0)
n += x.shape[0]
# Compute ridge matrices
Vxx = Sxx.diag() - (Sx.square() / n)
Vyy = Syy - (Sy.square() / n)
XX = (Sxx - torch.outer(Sx, Sx) / n) / torch.outer(Vxx, Vxx).sqrt()
Xy = (Sxy - torch.outer(Sx, Sy) / n) / torch.outer(Vxx, Vyy).sqrt()
# Compute and select weights
best_validation_error, best_W, best_b = None, None, None
for regularization in regularization_constants:
# Compute weights
XXr = (XX + torch.eye(n=XX.shape[0]).to(XX) * regularization) if regularization else XX
Ws = torch.linalg.solve(XXr, Xy)
W = Ws * torch.sqrt(Vyy.expand_as(Ws) / Vxx.unsqueeze(-1))
b = (Sy / n) - (Sx / n) @ W
# Validate, select
validation_error = get_validation_error((W.t(), b))
if best_validation_error is None or validation_error < best_validation_error:
best_validation_error, best_W, best_b = validation_error, W.t(), b
if verbose:
print(f'{regularization:e}: {validation_error}', file=sys.stderr)
return best_W, best_b | d179e70aa53da0ff38ede9bbbf3fbe58b81c2886 | 2,676 |
import pathlib
def create_scan_message():
"""Creates a dummy message of type v3.asset.file to be used by the agent for testing purposes.
The files used is the EICAR Anti-Virus Test File.
"""
file_content = (pathlib.Path(__file__).parents[0] / 'files/malicious_dummy.com').read_bytes()
selector = 'v3.asset.file'
msg_data = {'content': file_content, 'path': 'some/dummy/path'}
return message.Message.from_data(selector, data=msg_data) | e899e705fc022046876dd2a1584e7db74c4b7105 | 2,677 |
def is_permutation_matrix( m ):
"""
Test whether a numpy array is a `permutation matrix`_.
.. _permutation_matrix: https://en.wikipedia.org/wiki/Permutation_matrix
Args:
m (mp.matrix): The matrix.
Returns:
(bool): True | False.
"""
m = np.asanyarray(m)
return (m.ndim == 2 and m.shape[0] == m.shape[1] and
(m.sum(axis=0) == 1).all() and
(m.sum(axis=1) == 1).all() and
((m == 1) | (m == 0)).all()) | 7cfe48fd0cd36c4ff151ebe248c79e685ee99cc8 | 2,678 |
def create_security_role(connection, body, error_msg=None):
"""Create a new security role.
Args:
connection: MicroStrategy REST API connection object
body: JSON-formatted definition of the dataset. Generated by
`utils.formjson()`.
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
Complete HTTP response object.
"""
return connection.session.post(
url=f'{connection.base_url}/api/securityRoles',
headers={'X-MSTR-ProjectID': None},
json=body,
) | fbae3596e0cdcc430b2a7a30fc9ed594f3717ba3 | 2,679 |
def dbm_to_w(dbm):
"""Convert dBm to W."""
return 10 ** (dbm / 10.) * sc.milli | b6b782f35a3a07a2f372958363609b3b0f00a43a | 2,680 |
from operator import inv
def lml(alpha, beta, Phi, Y):
"""
4 marks
:param alpha: float
:param beta: float
:param Phi: array of shape (N, M)
:param Y: array of shape (N, 1)
:return: the log marginal likelihood, a scalar
"""
N = len(Phi)
M = len(Phi[0])
part1 = (-N*0.5)*np.log(2*np.pi)
wholePhi = np.dot(np.dot(Phi, alpha*np.identity(M)), Phi.T)
wholeBeta = beta*np.identity(N)
part2 = - 0.5*np.log(np.linalg.det(wholePhi + wholeBeta))
part3 = -0.5*np.dot(np.dot(Y.T, inv((wholePhi + wholeBeta))), Y)
logFunc = part1 + part2 + part3
return logFunc[0][0] | a6d17ed0f6c81958360687d5758cd8a35147dd56 | 2,681 |
from typing import Callable
from typing import Any
import os
def convert_env_var(var_name: str, *, cast_type: Callable[..., Any] = float, default: Any = None) -> Any:
"""
Attempts to read an environment variable value and cast it to a type. For example it permits
getting numeric value(s) from os.environ
:param var_name: Key to lookup from environment variables.
:param cast_type: The callable instance to run the env string through if exists.
:param default: Default value to return if the specified var_name does not exist in os.environ
"""
try:
return cast_type(os.environ.get(var_name, default))
except (TypeError, ValueError):
raise EnvironFetchException(f"Unable to cast to: {type(cast_type)}") | 8b01cdca21f32aad2471946c39a2fc5962e316ef | 2,682 |
def balance_set(X, Y, adr_labels_size, nonadr_labels_size):
"""balances the set by doing up- and down -sampling to converge into the same class size
# Arguments
X - set samples
Y - set labels
adr_labels_size - ADR_MENTION_CLASS size
nonadr_labels_size - NON_ADR_MENTION_CLASS size
# Returns
new_X - new balanced samples
new_Y - new labels corresponding to new_X
"""
print("Performing Class Balancing...")
adr_samples_needed = nonadr_labels_size - adr_labels_size
new_X = []
new_Y = []
adr_labels_size = 0
nonadr_labels_size = 0
for index, example in enumerate(X):
if adr_samples_needed > 0:
if Y[index] == ADR_MENTION_CLASS_LABEL:
new_X.append(example) # add original 'ADR' sample
new_Y.append(ADR_MENTION_CLASS_LABEL)
new_X.append(example) # add duplicate 'ADR' sample to perform Over-Sampling
new_Y.append(ADR_MENTION_CLASS_LABEL)
adr_labels_size += 2
adr_samples_needed -= 1
else:
# we don't add original 'No ADR Mention' sample to perform Under-Sampling
adr_samples_needed -= 1
else:
if Y[index] == ADR_MENTION_CLASS_LABEL:
adr_labels_size += 1
else:
nonadr_labels_size += 1
new_X.append(example) # add original sample
new_Y.append(Y[index]) # add original label
print(" Updated dataset size: {}".format(len(new_X)))
print(" {} class size: {}".format(ADR_MENTION_CLASS_NAME, adr_labels_size))
print(" {} class size: {}".format(NON_ADR_MENTION_CLASS_NAME, nonadr_labels_size))
return new_X, new_Y | e73468dd600a9d6f9b13a46356110d35fba8ce59 | 2,683 |
from pathlib import Path
def load_det_lcia(result_dir, method, act_code, det_lcia_dict=None):
"""Return precalculated deterministic LCIA score"""
result_dir = Path(_check_result_dir(result_dir))
method = _check_method(method)
if not det_lcia_dict:
det_lcia_dict = _get_det_lcia_dict(result_dir, method)
if not act_code in det_lcia_dict:
raise ValueError("No deterministic result for activity with code {} "
"in deterministic LCIA dictionary".format(
act_code
))
return det_lcia_dict[act_code] | c9ba6532f674bcbe988cdc645b7dd86a93ed27e5 | 2,684 |
def get_geometry(location, geolevel):
"""
Get geometry of a single location code/name
"""
if not utils.is_number(location) and location != "BR":
assert geolevel, "You need to specify which geographic level this location is"
location = ibgetools.ibge_encode(location, geolevel)
if location == -1:
return shapely.geometry.Polygon([])
url = build_url(location)
geojson = get_geojson(url)
features = utils.get_features(geojson)
return shapely.geometry.shape(features[0]["geometry"]) | da53cfe7845c7adffbcbd941dc3f0b62bdb15e2f | 2,685 |
def render_to_string(template, context={}, processors=None):
"""
A function for template rendering adding useful variables to context
automatically, according to the CONTEXT_PROCESSORS settings.
"""
if processors is None:
processors = ()
else:
processors = tuple(processors)
for processor in get_standard_processors() + processors:
context.update(processor(get_request()))
template = local.app.jinja2_env.get_template(template)
return template.render(context) | 678eab60113a05fba86591ee7bb47e26ecfb0b37 | 2,686 |
def find_node_names(structure):
""" Return the names of the nodes for the structure """
# Look through all of the items in the structure for names
# Check through each of the lists and sub-lists
names=set()
for i in xrange(len(structure)):
if isinstance(structure[i],basestring):
# do not return joins
if not structure[i] in [AND_DELIMITER, OR_DELIMITER, " "]:
names.add(structure[i])
elif isinstance(structure[i], list):
names.update(find_node_names(structure[i]))
return names | 812194e2d8dbd34741e9f03a6c775bb30f551341 | 2,687 |
import os
def run_calcs(run_id, year, no_ef_countries, export_data=True, include_TD_losses=True, BEV_lifetime=180000, ICEV_lifetime=180000, flowtrace_el=True, allocation=True, production_el_intensity=679, incl_ei=False, energy_sens=False):
"""Run all electricity mix and vehicle calculations and exports results."""
# Korean el-mix 679 g CO2/kWh, from ecoinvent
fp = os.path.curdir
production, trades, trade_ef, country_total_prod_disagg, country_total_cons_disagg, g_raw, C = load_prep_el_data(fp, year)
codecheck_file, elmixes, trade_only, country_el, CFEL, CFCI = el_calcs(flowtrace_el, run_id, fp, C, production, country_total_prod_disagg, country_total_cons_disagg, g_raw, trades, trade_ef, include_TD_losses, incl_ei, export_data) # Leontief electricity calculations
results_toSI, ICEV_total_impacts, ICEV_prodEOL_impacts, ICEV_op_int = BEV_calcs(fp, country_el, production, elmixes, BEV_lifetime, ICEV_lifetime, production_el_intensity, CFCI, allocation, energy_sens)
SI_fp = export_SI(run_id, results_toSI, production, trades, C, CFEL, no_ef_countries)
pickle_results(run_id, results_toSI, CFEL, ICEV_total_impacts, codecheck_file, export_data)
return results_toSI['BEV footprint'].xs('Consumption mix', level=1, axis=1), ICEV_prodEOL_impacts, ICEV_op_int, SI_fp | 7bb80133ec3ee684c8db229f9f41940c994c3634 | 2,688 |
def handle_question():
"""Save response and redirect to next question."""
# get the response choice
choice = request.form['answer']
# add this response to the session
responses = session[RESPONSES_KEY]
responses.append(choice)
session[RESPONSES_KEY] = responses
if (len(responses) == len(survey.questions)):
# They've answered all the questions! Thank them.
return redirect("/complete")
else:
return redirect(f"/questions/{len(responses)}") | 184dc816303f48e134320f602126d381ee820b59 | 2,689 |
from typing import Callable
def makeNotePlayer(seq: Sequencer, out: PortInfo
) -> Callable[[int, bool], None]:
"""Returns a callable object that plays midi notes on a port."""
def playNote(note: int, enabled: bool) -> None:
if enabled:
seq.sendEvent(NoteOn(0, 0, note, 127), out)
else:
seq.sendEvent(NoteOff(0, 0, note, 0), out)
return playNote | 7cb9741944f6f71fbfd55b825c2c7e4638bfa317 | 2,690 |
def se_resnet152(**kwargs):
"""TODO: Add Doc"""
return _resnet("se_resnet152", **kwargs) | 52dd9fa145f6216519282633aa54e6e17802aaa9 | 2,691 |
import base64
def file_to_attachment(filename):
"""
Convert a file to attachment
"""
with open(filename, 'rb') as _file:
return {'_name':filename,
'content':base64.b64encode(_file.read())
} | 9b64fe8a4329eae000cd76d58450c32644a736f6 | 2,692 |
def ratio_selection(
strain_lst,
ratio_lst,
pressure_lst,
temperature_lst,
ratio_boundary,
debug_plot=True,
):
"""
Args:
strain_lst:
ratio_lst:
pressure_lst:
temperature_lst:
ratio_boundary:
debug_plot:
Returns:
"""
if debug_plot:
plt.plot(strain_lst, ratio_lst)
plt.axhline(0.5 + ratio_boundary, color="red", linestyle="--")
plt.axhline(0.5, color="black", linestyle="--")
plt.axhline(0.5 - ratio_boundary, color="red", linestyle="--")
plt.xlabel("Strain")
plt.ylabel("ratio solid vs. liquid")
rat_lst, rat_col_lst = [], []
for rat in ratio_lst:
if (0.5 - ratio_boundary) < rat < (0.5 + ratio_boundary):
rat_lst.append(rat)
elif len(rat_lst) != 0:
rat_col_lst.append(rat_lst)
rat_lst = []
if len(rat_lst) != 0:
rat_col_lst.append(rat_lst)
if len(rat_col_lst) != 0:
rat_max_ind = np.argmax([len(lst) for lst in rat_col_lst])
ratio_ind = [r in rat_col_lst[rat_max_ind] for r in ratio_lst]
strain_value_lst = np.array(strain_lst)[ratio_ind]
ratio_value_lst = np.array(ratio_lst)[ratio_ind]
pressure_value_lst = np.array(pressure_lst)[ratio_ind]
temperature_value_lst = np.array(temperature_lst)[ratio_ind]
if debug_plot:
plt.axvline(np.min(strain_value_lst), color="blue", linestyle="--")
plt.axvline(np.max(strain_value_lst), color="blue", linestyle="--")
plt.show()
if np.mean(ratio_value_lst) > 0.5:
return (
strain_value_lst,
ratio_value_lst,
pressure_value_lst,
temperature_value_lst,
1,
)
else:
return (
strain_value_lst,
ratio_value_lst,
pressure_value_lst,
temperature_value_lst,
-1,
)
else:
if np.mean(ratio_lst) > 0.5:
return [], [], [], [], 1
else:
return [], [], [], [], -1 | f4260649c50b33d9ee818ebdc0469d693720937d | 2,693 |
def diff_mean(rolling_window, axis=-1):
"""For M5 purposes, used on an object generated by the
rolling_window function. Returns the mean of the first
difference of a window of sales."""
return np.diff(rolling_window, axis=axis).mean(axis=axis) | 85294f16c89658eaca9562e1ff4652d5865a5a59 | 2,694 |
import numpy
def noiseFraction(truth_h5, measured_h5, tolerance):
"""
Return the fraction of measured localizations that are greater than
tolerance pixels from the nearest truth localization.
Note: This will return 0 if there are no measured localizations.
truth_h5 - A saH5Py.SAH5Py object with the ground truth localizations.
measured_h5 - A saH5Py.SAH5Py object with the found localizations.
tolerance - The search radius in pixels.
"""
if (measured_h5.getNLocalizations() == 0):
return [0, truth_h5.getNLocalizations()]
noise_locs = 0
total_locs = 0
for i in range(truth_h5.getMovieLength()):
t_locs = truth_h5.getLocalizationsInFrame(i)
m_locs = measured_h5.getLocalizationsInFrame(i)
if bool(t_locs) and bool(m_locs):
dist = iaUtilsC.peakToPeakDistAndIndex(t_locs['x'], t_locs['y'],
m_locs['x'], m_locs['y'],
max_distance = tolerance)[0]
noise_locs += numpy.count_nonzero((dist < 0.0))
total_locs += dist.size
elif bool(t_locs):
total_locs += t_locs['x'].size
return [noise_locs, total_locs] | 282e8c835906cf218e6eb1ef94cbb595419419f5 | 2,695 |
import os
def prepare(compute: dict, script_id: str):
"""Prepare the script
:param compute: The instance to be attacked.
:param script_id: The script's filename without the filename ending. Is named after the activity name.
:return: A tuple of the Command Id and the script content
"""
os_type = __get_os_type(compute)
if os_type == OS_LINUX:
command_id = 'RunShellScript'
script_name = "{}.sh".format(script_id)
else:
if script_id in UNSUPPORTED_WINDOWS_SCRIPTS:
raise InterruptExecution("'{}' is not supported for os '{}'"
.format(script_id, OS_WINDOWS))
command_id = 'RunPowerShellScript'
script_name = "{}.ps1".format(script_id)
file_path = os.path.join(os.path.dirname(__file__), "../scripts", script_name)
with open(file_path) as file_path:
script_content = file_path.read()
return command_id, script_content | 547662decdc541ba398a273a91280ce9b60b2006 | 2,696 |
def compute_rigid_flow(depth, pose, intrinsics, reverse_pose=False):
"""Compute the rigid flow from target image plane to source image
Args:
depth: depth map of the target image [batch, height_t, width_t]
pose: target to source (or source to target if reverse_pose=True)
camera transformation matrix [batch, 6], in the order of
tx, ty, tz, rx, ry, rz;
intrinsics: camera intrinsics [batch, 3, 3]
Returns:
Rigid flow from target image to source image [batch, height_t, width_t, 2]
"""
with tf.variable_scope('compute_rigid_flow'):
batch, height, width = depth.get_shape().as_list()
# Convert pose vector to matrix
pose = pose_vec2mat(pose)
if reverse_pose:
pose = tf.matrix_inverse(pose)
# Construct pixel grid coordinates
pixel_coords = meshgrid(batch, height, width)
tgt_pixel_coords = tf.transpose(pixel_coords[:,:2,:,:], [0, 2, 3, 1])
# Convert pixel coordinates to the camera frame
cam_coords = pixel2cam(depth, pixel_coords, intrinsics)
# Construct a 4x4 intrinsic matrix
filler = tf.constant([0.0, 0.0, 0.0, 1.0], shape=[1, 1, 4])
filler = tf.tile(filler, [batch, 1, 1])
intrinsics = tf.concat([intrinsics, tf.zeros([batch, 3, 1])], axis=2)
intrinsics = tf.concat([intrinsics, filler], axis=1)
# Get a 4x4 transformation matrix from 'target' camera frame to 'source'
# pixel frame.
proj_tgt_cam_to_src_pixel = tf.matmul(intrinsics, pose)
src_pixel_coords = cam2pixel(cam_coords, proj_tgt_cam_to_src_pixel)
rigid_flow = src_pixel_coords - tgt_pixel_coords
return rigid_flow | 5b01bfb9768bc1f180b06f599e71c4808c945854 | 2,697 |
def get_versions(script_name):
""" 返回指定名称脚本含有的所有版本。"""
versions = repository.get(script_name, None)
if not versions:
return None
return sorted(versions, reverse=True) | 4399c5531bbf0d10f750d64ce3a63e156d62ba1b | 2,698 |
import os
def data_static(filename):
"""
Get files
:param filename:
:return:
"""
_p, _f = os.path.split(filename)
print(_p, _f)
return flask.send_from_directory(os.path.join(
'/Users/dmitryduev/_caltech/python/deep-asteroids/data-raw/', _p), _f) | ebaf91e16fc3f0a83da47c61723c62a03533fa1c | 2,699 |
Subsets and Splits