content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import idwgopt.idwgopt_default as idwgopt_default
def default(nvars):
""" Generate default problem structure for IDW-RBF Global Optimization.
problem=idwgopt.default(n) generate a default problem structure for a
an optimization with n variables.
(C) 2019 by A. Bemporad.
"""
problem = idwgopt_default.set(nvars)
return problem
|
6e865ffdab0b3913c793357b6cb2688a6cd4dc00
| 25,877 |
from point import Point
from line import Segment
from polygon import Polygon
def convex_hull(*args):
"""
Returns a Polygon representing the convex hull of a set of 2D points.
Notes:
======
This can only be performed on a set of non-symbolic points.
Example:
========
>>> from sympy.geometry import Point
>>> points = [ Point(x) for x in [(1,1), (1,2), (3,1), (-5,2), (15,4)] ]
>>> convex_hull(points)
Polygon(Point(3, 1), Point(15, 4), Point(-5, 2), Point(1, 1))
Description of method used:
===========================
See http://en.wikipedia.org/wiki/Graham_scan.
"""
p = args[0]
if isinstance(p, Point):
p = args
# Basic checks
if len(p) == 1:
return p[0]
elif len(p) == 2:
return Segment(p[0], p[1])
# Find lowest+rightmost point
m = 0
for i in xrange(1, len(p)):
if (p[i][1] < p[m][1]) or ((p[i][1] == p[m][1]) and (p[i][0] > p[m][0])):
m = i
p[0], p[m] = p[m], p[0]
def tarea(a, b, c):
return (b[0] - a[0])*(c[1] - a[1]) - (c[0] - a[0])*(b[1] - a[1])
# Radial sort of points with respect to p[0] (our pivot)
destroy = {}
p0 = p[0]
def pcompare(p1, p2):
a = tarea(p0, p1, p2)
if a > 0:
return -1
elif a < 0:
return 1
else:
x = abs(p1[0] - p0[0]) - abs(p2[0] - p0[0])
y = abs(p1[1] - p0[1]) - abs(p2[1] - p0[1])
if (x < 0) or (y < 0):
destroy[p1] = True
return -1
elif (x > 0) or (y > 0):
destroy[p2] = True
return 1
else:
destroy[p1] = True
return 0
p = p[1:]
p.sort(pcompare)
p.insert(0, p0)
# Destroy points as found by sorting
for i in xrange(len(p)-1, -1, -1):
if p[i] in destroy:
del p[i]
# Graham scan
def isleft(a, b, c):
return (tarea(a, b, c) > 0)
top = [p[0], p[1]]
i = 2
while i < len(p):
p1 = top[-2]
p2 = top[-1]
if isleft(p1, p2, p[i]):
top.append(p[i])
i += 1
else:
top.pop()
return Polygon(top)
|
ee1c1fd65dfe849a36a6dfc8e86a4e1e2ee8ca69
| 25,878 |
from typing import Dict
def find_namespaces(tree: ElementTree) -> Dict[str, str]:
"""
Finds the namespaces defined in the ElementTree of an XML document. It looks for namespaces
defined in the root element of the XML document. To avoid namespaces being left out, they shall
all be defined in the root element of an XML document, instead of being defined across the
document.
:param tree: An lxml ElementTree containing the XML document from which to extract the namespaces.
:return: A dictionary containing the mapping between short namespace and full namespace.
"""
root = tree.getroot()
namespaces = root.nsmap
try:
namespaces.pop(None)
except KeyError:
pass
return namespaces
|
8b2a523c9d7152280fa609563e94eda4facebe4b
| 25,879 |
def filterStories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
filteredStories = []
for story in stories:
for trig in triggerlist:
if trig.evaluate(story) and story not in filteredStories:
filteredStories.append(story)
return filteredStories
|
1fcf2592e22c97cd13919dbfe5b8a4acde682761
| 25,880 |
def lcs(a, b):
"""
Compute the length of the longest common subsequence between two sequences.
Time complexity: O(len(a) * len(b))
Space complexity: O(min(len(a), len(b)))
"""
# This is an adaptation of the standard LCS dynamic programming algorithm
# tweaked for lower memory consumption.
# Sequence a is laid out along the rows, b along the columns.
# Minimize number of columns to minimize required memory
if len(a) < len(b):
a, b = b, a
# Sequence b now has the minimum length
# Quit early if one sequence is empty
if len(b) == 0:
return 0
# Use a single buffer to store the counts for the current row, and
# overwrite it on each pass
row = [0] * len(b)
for ai in a:
left = 0
diag = 0
for j, bj in enumerate(b):
up = row[j]
if ai == bj:
value = diag + 1
else:
value = max(left, up)
row[j] = value
left = value
diag = up
# Return the last cell of the last row
return left
|
0201e9efade98aece854e05d0910192251e5f63c
| 25,881 |
def save(config, filename="image.img", host=None):
"""Save the Image File to the disk"""
cmd = DockerCommandBuilder(host=host).save(config.getImageName()).set_output(filename).build()
return execute(cmd)
|
628dca6307b6a5d975e90e08649f20790bc8b639
| 25,882 |
def lines2bars(lines, is_date):
"""将CSV记录转换为Bar对象
header: date,open,high,low,close,money,volume,factor
lines: 2022-02-10 10:06:00,16.87,16.89,16.87,16.88,4105065.000000,243200.000000,121.719130
"""
if isinstance(lines, str):
lines = [lines]
def parse_date(x):
return arrow.get(x).date()
def parse_naive(x):
return arrow.get(x).naive
if is_date:
convert = parse_date
else:
convert = parse_naive
data = []
for line in lines:
fields = line.split(",")
data.append(
(
convert(fields[0]),
float(fields[1]),
float(fields[2]),
float(fields[3]),
float(fields[4]),
float(fields[5]),
float(fields[6]),
float(fields[7]),
)
)
return np.array(data, dtype=bars_dtype)
|
4d2049d08f885de3b999b1537a48c03088f45da3
| 25,884 |
import torch
def pgd_linf_untargeted(model, X, y, epsilon=0.1, alpha=0.01, num_iter=20, randomize=False):
""" Construct FGSM adversarial examples on the examples X"""
if randomize:
delta = torch.rand_like(X, requires_grad=True)
delta.data = delta.data * 2 * epsilon - epsilon
else:
delta = torch.zeros_like(X, requires_grad=True)
for t in range(num_iter):
loss = nn.CrossEntropyLoss()(model(X + delta), y)
loss.backward()
delta.data = (delta + alpha*delta.grad.detach().sign()).clamp(-epsilon,epsilon)
delta.grad.zero_()
return delta.detach()
|
b19091048d269853c6b55c4d96d5919c4efcfbe6
| 25,885 |
def cal_NB_pvalue (treatTotal,controlTotal,items):
"""calculate the pvalue in pos of chromosome.
"""
pvalue = 1
(treatCount,controlCount,pos)=items
pvalue = negativeBinomail(treatCount,treatTotal,controlCount,controlTotal)
return (pvalue,treatCount,controlCount,pos)
|
f68809ffb40949c2d4ca1486870ec421d48bbfb5
| 25,886 |
def handle_watches(connection, author):
"""Return an array of watches for the author."""
database = connection['test']
collection = database['watches']
watches = []
# this should not except
for post in collection.find({"author" : ObjectId(author)}):
watches.append(cleanup_watch(post))
return watches
|
bfb765e30d249fac30fdbf567006283be1808e6c
| 25,887 |
def new_mm(*args, figsize, **kwargs):
"""Wrapper for plt.subplots, using figsize in millimeters
:rtype: figure, axes
"""
return plt.subplots(*args, figsize=(figsize[0] / 25.4, figsize[1] / 25.4), **kwargs)
|
7111f1fd8261d3367bff03fd36ed86cc26917fe8
| 25,888 |
def compute_center_of_mass(coordinates, masses):
"""
Given coordinates and masses, return center of mass coordinates.
Also works to compute COM translational motion.
Args:
coordinates ({nparticle, ndim} ndarray): xyz (to compute COM) or velocities (COM velocity)
masses ({nparticle,} array_like): masses
Returns:
({ndim,} ndarray): center of mass coordinates
"""
coordinates_cp = np.array(coordinates)
mass_cp = np.reshape(masses, (-1, 1))
com_coordinates = np.sum(mass_cp * coordinates_cp, axis=0)/np.sum(mass_cp)
return com_coordinates
|
d190c20930209e180524c07c8bf8fef9ab95734b
| 25,889 |
def chars_count(word: str):
"""
:param word: string to count the occurrences of a character symbol for.
:return: a dictionary mapping each character found in word to the number of times it appears in it.
"""
res = dict()
for c in word:
res[c] = res.get(c, 0) + 1
return res
|
30c27b23c04909a65264247d068e9e2c695c6ecc
| 25,890 |
def do_expressiondelete(**kwargs):
"""
Worker to remove expression from engine
proexpobj: expression object
profileexplist: expression list object
return 0 if expression deleted
"""
proexpobj = kwargs.get('proexpobj')
profileexplist = kwargs.get('profileexplist')
if profileexplist.delete(proexpobj.profile_expression_id):
return 1
else:
return 0
|
4d4f26aca34417026ac326d237f817b88afe525c
| 25,891 |
import csv
def read_csv_as_nested_dict(filename, keyfield, separator, quote):
"""
Inputs:
filename - name of CSV file
keyfield - field to use as key for rows
separator - character that separates fields
quote - character used to optionally quote fields
Output:
Returns a dictionary of dictionaries where the outer dictionary
maps the value in the key_field to the corresponding row in the
CSV file. The inner dictionaries map the field names to the
field values for that row.
"""
table = {}
with open(filename, newline='') as csvfile:
csvreader = csv.DictReader(csvfile, delimiter=separator, quotechar=quote)
for row in csvreader:
rowid = row[keyfield]
table[rowid] = row
return table
|
b86a19e531ac2d0c815839714ee93fbc618e911d
| 25,892 |
def msgpackb(lis):
"""list -> bytes"""
return create_msgpack(lis)
|
4e2667ff32c58be09620cd8360ff0207406a7871
| 25,893 |
from azureml._execution import _commands
from azureml.core.runconfig import RunConfiguration
from azureml._project.project import Project
def prepare_compute_target(experiment, source_directory, run_config):
"""Prepare the compute target.
Installs all the required packages for an experiment run based on run_config and custom_run_config.
:param experiment:
:type experiment: azureml.core.experiment.Experiment
:param source_directory:
:type source_directory: str
:param run_config: The run configuration. This can be a run configuration name, as string, or a
azureml.core.runconfig.RunConfiguration object.
:type run_config: str or azureml.core.runconfig.RunConfiguration
:return: A run object
:rtype: azureml.core.script_run.ScriptRun
"""
run_config_object = RunConfiguration._get_run_config_object(path=source_directory, run_config=run_config)
project_object = Project(experiment=experiment, directory=source_directory)
return _commands.prepare_compute_target(project_object, run_config_object)
|
d6a7f2f45483c2e0a42bcb03407791ca781318ab
| 25,894 |
def streaming_ndarray_agg(
in_stream,
ndarray_cols,
aggregate_cols,
value_cols=[],
sample_cols=[],
chunksize=30000,
add_count_col=False,
divide_by_count=False,
):
"""
Takes in_stream of dataframes
Applies ndarray-aware groupby-sum or groupby-mean: treats ndarray_cols as numpy arrays,
value_cols as normal values, for sample_cols takes the first element.
Does groupby over aggregate_cols
if add_count_col is True, adds column "count", if it's a string - adds column with add_count_col name
if divide_by_counts is True, divides result by column "count".
If it's a string, divides by divide_by_count column
This function can be used for automatically aggregating P(s), R(s) etc.
for a set of conformations that is so large that all P(s) won't fit in RAM,
and when averaging needs to be done over so many parameters
that for-loops are not an issue. Examples may include simulations in which sweep
over many parameters has been performed.
"""
value_cols_orig = [i for i in value_cols]
ndarray_cols, value_cols = list(ndarray_cols), list(value_cols)
aggregate_cols, sample_cols = list(aggregate_cols), list(sample_cols)
if add_count_col is not False:
if add_count_col is True:
add_count_col = "count"
value_cols.append(add_count_col)
def agg_one(dfs, aggregate):
"""takes a list of DataFrames and old aggregate
performs groupby and aggregation and returns new aggregate"""
if add_count_col is not False:
for i in dfs:
i[add_count_col] = 1
df = pd.concat(dfs + ([aggregate] if aggregate is not None else []), sort=False)
aggregate = ndarray_groupby_aggregate(
df,
ndarray_cols=ndarray_cols,
aggregate_cols=aggregate_cols,
value_cols=value_cols,
sample_cols=sample_cols,
preset="sum",
)
return aggregate.reset_index()
aggregate = None
cur = []
count = 0
for i in in_stream:
cur.append(i)
count += len(i)
if count > chunksize:
aggregate = agg_one(cur, aggregate)
cur = []
count = 0
if len(cur) > 0:
aggregate = agg_one(cur, aggregate)
if divide_by_count is not False:
if divide_by_count is True:
divide_by_count = "count"
for i in ndarray_cols + value_cols_orig:
aggregate[i] = aggregate[i] / aggregate[divide_by_count]
return aggregate
|
a47a3f82444dc1ef7d5eb5f63d7dd77c862fc605
| 25,895 |
from typing import List
def get_cropped_source_data(
stack_list: List[str], crop_origin: np.ndarray, crop_max: np.ndarray
) -> np.ndarray:
"""
Read data from the given image files in an image stack
:param List[str] stack_list: List of filenames representing images in a stack
:param np.ndarray crop_origin: Origin of region to crop, array of shape (3,)
:param np.ndarray crop_max: Max position of region to crop, array of shape (3,)
:return: Cropped source data as an array of shape (x,y,z)
"""
stack_files = stack_list[crop_origin[2] : crop_max[2]]
img_slices = []
for f in stack_files:
img = Image.open(f)
img_arr = np.array(img)
# crop from image
img_crop = img_arr[
crop_origin[0] : crop_max[0],
crop_origin[1] : crop_max[1],
]
img_slices.append(img_crop)
return np.stack(img_slices, axis=2)
|
40f2537417a99d070979ba206de7c6e91a313b02
| 25,896 |
def valid_random_four_channel_images() -> str:
"""
Make a folder with 5 valid images that have 4 channels.
:return: path to the folder
"""
# use .png because that supports 4 channels
return make_folder_with_files('.png', file_type='image', resolution=(300, 300), n_files=6, channels=4)
|
6b39f46467b4ded5a773964255293a3b587d9b6d
| 25,897 |
def build_template(spec) -> Template:
"""Build a template from a specification.
The resulting template is an object that when called with a set of
bindings (as produced by a matcher from `build_matcher`), returns
an instance of the template with names substituted by their bound values.
This is a generic function. Support for additional template specifications
can be added with the `build_template.register(<type>, <handler>)` function.
See the documentation of `functools.singledispatch` for further information.
"""
return LiteralTemplate(spec)
|
ef44befe0a937b786a48b1e1ddf729f5c1327e3b
| 25,898 |
def roll(y, z):
"""Estimate angular roll from gravitational acceleration.
Args:
y, z (float, int, array-like): y, and z acceleration
Returns:
(float, int, array-like): roll
"""
return np.arctan2(y, z) * 180/np.pi
|
ccb0bf948baf7fee9853f4b842139e8a964c25b6
| 25,901 |
def check_valid_move(grid: np.ndarray, current_position: tuple, move: tuple) -> bool:
"""
Checking if move is valid for the current position in provided grid
:param grid: validated array of a grid
:param current_position: current position
:param move: move in tuple form
:return: True or False
"""
# getting coordinates for moved position
moved_position = tuple(np.add(current_position, move))
def compare_coordinates(a: tuple, b: tuple) -> bool:
"""
Helper function to compare coordinates
Checks if a is smaller than b
"""
return all(np.array(a) < np.array(b))
# checking if coordinates are inside the array (between (0,0) and (N,N))
if compare_coordinates((0, 0), moved_position) and compare_coordinates(moved_position, grid.shape):
# checking if the coordinates are not on the obstacle
if grid[moved_position] == 'x':
return False
else:
return True
else:
return False
|
60f58c618a01aad744e1ae7c6d425fc69db20686
| 25,904 |
def check_valid_game(season, game):
"""
Checks if gameid in season schedule.
:param season: int, season
:param game: int, game
:return: bool
"""
try:
get_game_status(season, game)
return True
except IndexError:
return False
|
5bc95a2dc397b933c1e5716eb5a7e79641b87968
| 25,905 |
def console(session_console):
"""Return a root console.
Be sure to use this fixture if the GUI needs to be initialized for a test.
"""
console = session_console
assert libtcodpy.console_flush() == 0
libtcodpy.console_set_default_foreground(console, libtcodpy.white)
libtcodpy.console_set_default_background(console, libtcodpy.black)
libtcodpy.console_set_background_flag(console, libtcodpy.BKGND_SET)
libtcodpy.console_set_alignment(console, libtcodpy.LEFT)
libtcodpy.console_clear(console)
return console
|
25c13c549a40c24f7abc90ecb7c303bbc791643b
| 25,906 |
def conv(out_channels, kernel_size, stride=2, padding=1, batch_norm=True):
"""Creates a convolutional layer, with optional batch normalization.
"""
layers = []
conv_layer = Conv2D(out_channels, kernel_size, strides = stride, padding = 'same', use_bias = False, data_format = "channels_first")
# bias is set to False, so the layers are not offset by any amount
# append conv layer
layers.append(conv_layer)
if batch_norm:
# append batchnorm layer
layers.append(BatchNormalization())
## rtype: List[conv_layer, batch_norm] or List[conv_layer]
return layers
|
fe568a8b3cd5092db6751677f1accd3d73e36e77
| 25,907 |
def t90_from_t68(t68):
"""
ITS-90 temperature from IPTS-68 temperature
This conversion should be applied to all in-situ
data collected between 1/1/1968 and 31/12/1989.
"""
return t68 / 1.00024
|
a2d8c7ccc0797d47fa8f732bdb61c1ec1e15700e
| 25,908 |
def prettify(elem):
"""Return a pretty-printed XML strong for the Element.
"""
rough_string = ElementTree.tostring(elem, "utf-8")
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
|
b7686ca0b6d6def2e86f465fa0eb5726fe29d2ab
| 25,909 |
def _pad(
s: str,
bs: int,
) -> str:
"""Pads a string so its length is a multiple of a specified block size.
:param s: The string that is to be padded
:type s: str
:param bs: The block size
:type bs: int
:returns: The initial string, padded to have a length that is a multiple of the specified block size
:rtype: str
"""
number_of_bytes_to_pad = bs - len(s) % bs
ascii_string = chr(number_of_bytes_to_pad)
padding_str = number_of_bytes_to_pad * ascii_string
return s + padding_str
|
1da441d51c57da688ebcf46b7a30feb36cd007fe
| 25,911 |
import json
def get_json(headers) -> str:
"""Construct a str formatted like JSON"""
body: dict = {}
for key, value in headers.items():
body[key] = value
return json.dumps(body, indent=2)
|
8471f044ae986acd2173d5e9be26c110ee1b1976
| 25,912 |
from typing import Any
def is_jsonable_object(obj: Any) -> bool:
"""
Return `True` if ``obj`` is a jsonable object
"""
cls = obj if isinstance(obj, type) else type(obj)
return isinstance(getattr(cls, PHERES_ATTR, None), ObjectData)
|
f0492544c88efc46d135bc913bc7f8dd7a7f7462
| 25,914 |
def Dir(obj):
"""As the standard dir, but also listup fields of COM object
Create COM object with [win32com.client.gencache.EnsureDispatch]
for early-binding to get what methods and params are available.
"""
keys = dir(obj)
try:
## if hasattr(obj, '_prop_map_get_'):
## keys += obj._prop_map_get_.keys()
if hasattr(obj, '_dispobj_'):
keys += dir(obj._dispobj_)
finally:
return keys
|
8abc62fbe09e953fb171626a888838e21346ad9e
| 25,915 |
def image_classify(request):
""" Image classification """
if request.method == 'POST':
# Get upload image
img = request.FILES.get('img', None)
if img:
return JsonResponse(dict(name=img.name, size=img.size))
else:
return JsonResponse(dict(code=401, msg='Bad request'))
|
4f459f7a7afd90b1c6de7f174b4926d0d90b35cb
| 25,916 |
def calculate_performance_indicators_V1(df):
"""Compute indicators of performances from df of predictions and GT:
- MAE: absolute distance of predicted value to ground truth
- Accuracy: 1 if predicted value falls within class boundaries
Note: Predicted and ground truths coverage values are ratios between 0 and 1.
"""
# round to 3rd to avoid artefacts like 0.8999999 for 0.9 as key of dict
df[["vt_veg_b", "vt_veg_moy", "vt_veg_h"]] = (
df[["vt_veg_b", "vt_veg_moy", "vt_veg_h"]].astype(np.float).round(3)
)
# MAE errors
df["error_veg_b"] = (df["pred_veg_b"] - df["vt_veg_b"]).abs()
df["error_veg_moy"] = (df["pred_veg_moy"] - df["vt_veg_moy"]).abs()
df["error_veg_h"] = (df["pred_veg_h"] - df["vt_veg_h"]).abs()
df["error_veg_b_and_moy"] = df[["error_veg_b", "error_veg_moy"]].mean(axis=1)
df["error_all"] = df[["error_veg_b", "error_veg_moy", "error_veg_h"]].mean(axis=1)
# Accuracy
try:
df["acc_veg_b"] = df.apply(
lambda x: compute_accuracy(x.pred_veg_b, x.vt_veg_b), axis=1
)
df["acc_veg_moy"] = df.apply(
lambda x: compute_accuracy(x.pred_veg_moy, x.vt_veg_moy), axis=1
)
df["acc_veg_h"] = df.apply(
lambda x: compute_accuracy(x.pred_veg_h, x.vt_veg_h), axis=1
)
df["acc_veg_b_and_moy"] = df[["acc_veg_b", "acc_veg_moy"]].mean(axis=1)
df["acc_all"] = df[["acc_veg_b", "acc_veg_moy"]].mean(axis=1)
except KeyError:
logger.info(
"Cannot calculate class-based performance indicators due to continuous ground truths."
)
return df
|
f5c374ffb558eaf65a4c29894fbcb831162a451d
| 25,917 |
def PH2_Calc(KH2, tH2, Kr, I, qH2):
"""
Calculate PH2.
:param KH2: hydrogen valve constant [kmol.s^(-1).atm^(-1)]
:type KH2 : float
:param tH2: hydrogen time constant [s]
:type tH2 : float
:param Kr: modeling constant [kmol.s^(-1).A^(-1)]
:type Kr : float
:param I: cell load current [A]
:type I : float
:param qH2: molar flow of hydrogen [kmol.s^(-1)]
:type qH2 : float
:return: PH2 [atm] as float
"""
try:
result = ((1 / KH2) / (1 + tH2)) * (qH2 - 2 * Kr * I)
return result
except (TypeError, ZeroDivisionError):
print(
"[Error] PH2 Calculation Failed (KH2:%s, tH2:%s, Kr:%s, I:%s, qH2:%s)" %
(str(KH2), str(tH2), str(Kr), str(I), str(qH2)))
|
fe69353bfdde4f301439b89f9946782457d07645
| 25,918 |
import scipy
def log_transform(image):
"""Renormalize image intensities to log space
Returns a tuple of transformed image and a dictionary to be passed into
inverse_log_transform. The minimum and maximum from the dictionary
can be applied to an image by the inverse_log_transform to
convert it back to its former intensity values.
"""
orig_min, orig_max = scipy.ndimage.extrema(image)[:2]
#
# We add 1/2 bit noise to an 8 bit image to give the log a bottom
#
limage = image.copy()
noise_min = orig_min + (orig_max - orig_min) / 256.0 + np.finfo(image.dtype).eps
limage[limage < noise_min] = noise_min
d = {"noise_min": noise_min}
limage = np.log(limage)
log_min, log_max = scipy.ndimage.extrema(limage)[:2]
d["log_min"] = log_min
d["log_max"] = log_max
return stretch(limage), d
|
8e8d6779b313c7ff02e7aafa291e4d2abd687ac1
| 25,919 |
from bs4 import BeautifulSoup
from typing import Optional
def parse_cpu(website: BeautifulSoup, product_id: int) -> Optional[CPU]:
"""Parses the given Intel ARK website for a CPU."""
# thanks for making accessing so easy btw.
# a simple string used for identification of the CPU
raw = website.find(attrs={"data-key": "ProcessorNumber"})
if raw is None:
# too old CPU, got no processor ID, I have no other idea how I could
# identify it - just skip it
return None
model = raw.string.strip().casefold()
# just a number like 42 or 0 or... 8
raw = website.find(attrs={"data-key": "CoreCount"}).string
corecount = int(raw)
# a bit more complicated, could be "4.2 GHz" but also " 1337.42 MHz"
raw = website.find(attrs={"data-key": "ClockSpeed"}).string.strip().split()
value = float(raw[0])
unit = raw[1]
corespeed = human_readable_to_hertz(value, unit)
return CPU(
model,
product_id,
"intel",
corecount,
corespeed
)
|
1e7d068caba63947c39ce3a2391009986c5d6ad3
| 25,920 |
from typing import List
def create_result_dict(
begin_date: str,
end_date: str,
total_downloads: int,
downloads_per_country: List[dict],
multi_row_columns: dict,
single_row_columns: dict,
) -> dict:
"""Create one result dictionary with info on downloads for a specific eprint id in a given time period.
:param begin_date: The begin date of download period
:param end_date: The end date of download period
:param total_downloads: Total of downloads in that period
:param downloads_per_country: List of downloads per country
:param multi_row_columns: Dict of column names & values for columns that have values over multiple rows of an
eprintid
:param single_row_columns: Dict of column names & values for columns that have values only in the first row of an eprint id
:return: Results dictionary
"""
result = dict(
begin_date=begin_date,
end_date=end_date,
total_downloads=total_downloads,
downloads_per_country=downloads_per_country,
**multi_row_columns,
**single_row_columns,
)
# change empty strings to None so they don't show up in BigQuery table
for k, v in result.items():
result[k] = v if v != "" else None
return result
|
59ed6c40e98a8a68e1914f8f14b992b702851ccd
| 25,921 |
def getConcentricCell(cellNum, matNum, density, innerSurface, outerSurface, universe, comment):
"""Create a cell which has multiple components inside a cell."""
uCard = ''
if type(universe) is int:
uCard = 'u=' + str(universe)
listType = []
if type(innerSurface) == type(listType):
newInnerSurface = ''
i = 1
for surface in innerSurface:
if i % 5 == 0:
newInnerSurface += ' {}\n '.format(surface)
else:
newInnerSurface += ' {}'.format(surface)
i += 1
innerSurface = newInnerSurface
cellCard = "{} {} {} {} -{} {} imp:n=1 {}".format(cellNum, matNum, round(density, 5), innerSurface, outerSurface,
uCard, comment)
return cellCard
|
f0e8af3210774500eac0fde195896f3b85473e3f
| 25,922 |
import tqdm
def weight_compression(weights, bits, axis=0, quantizer=None):
"""Creates an in, out table that maps weight values to their codebook values.
Based on the idea presented by https://arxiv.org/pdf/1911.02079.pdf
Arguments:
weights: Numpy array
bits: Number of bits to compress weights to. This will
results in 2**bits codebook values
axis: axis to apply quantization by
quantizer: quantizer function that will be applied to codebook values
Returns:
index_table: array of indices that maps to codebook values for all weights
codebook_table: array of codebook values
"""
assert bits <= 8
n = 2**bits
index_table = []
codebook_table = np.zeros((weights.shape[axis], n))
km_models = [None] * weights.shape[axis]
for i, w in tqdm(enumerate(np.split(weights, weights.shape[axis], axis))):
original_shape = w.shape
w = w.ravel()
km = KMeans(n)
km.fit(w.reshape(-1, 1))
if quantizer:
km.cluster_centers_ = quantizer(km.cluster_centers_).numpy()
km.cluster_centers_.sort(axis=0)
km_models[i] = km
codebook_table[i, :] = km.cluster_centers_.flatten()
preds = km.predict(w.reshape(-1, 1))
index_table.append(preds.reshape(original_shape))
index_table = np.concatenate(index_table, axis)
return index_table, codebook_table
|
f7fd3a1908c51a1781367bfd717d9db6f7740934
| 25,925 |
import multiprocessing
def noncoherent_dedispersion(array, dm_grid, nu_max, d_nu, d_t, threads=1):
"""
Method that de-disperse dynamical spectra with range values of dispersion
measures and average them in frequency to obtain image in (t, DM)-plane.
:param array:
Numpy 2D array (#freq, #t) with dynamical spectra.
:param dm_grid:
Array-like of values of DM on which to de-disperse [cm^3/pc].
:param nu_max:
Maximum frequency [MHz].
:param d_nu:
Value of frequency step [MHz].
:param d_t:
Value of time step [s].
:param threads: (optional)
Number of threads used for parallelization with ``multiprocessing``
module. If ``1`` then it isn't used. (default: 1)
"""
n_nu, n_t = array.shape
nu = np.arange(n_nu, dtype=float)
nu = (nu_max - nu * d_nu)[::-1]
pool = None
if threads > 1:
pool = multiprocessing.Pool(threads, maxtasksperchild=1000)
if pool:
m = pool.map
else:
m = map
params = [(array, dm, nu, nu_max, d_t) for dm in dm_grid]
# Accumulator of de-dispersed frequency averaged frames
result = list(m(_de_disperse_by_value_freq_average, params))
result = np.array(result)
if pool:
# Close pool
pool.close()
pool.join()
return result
|
866e129e74ae121c093c70a67c811f6a0bf0d3bc
| 25,926 |
def ParseLabelTensorOrDict(labels):
"""Return a tensor to use for input labels to tensor_forest.
The incoming targets can be a dict where keys are the string names of the
columns, which we turn into a single 1-D tensor for classification or
2-D tensor for regression.
Converts sparse tensors to dense ones.
Args:
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A 2-D tensor for labels/outputs.
"""
if isinstance(labels, dict):
return math_ops.to_float(
array_ops.concat(
[
sparse_ops.sparse_tensor_to_dense(
labels[k], default_value=-1) if isinstance(
labels, sparse_tensor.SparseTensor) else labels[k]
for k in sorted(labels.keys())
],
1))
else:
if isinstance(labels, sparse_tensor.SparseTensor):
return math_ops.to_float(sparse_ops.sparse_tensor_to_dense(
labels, default_value=-1))
else:
return math_ops.to_float(labels)
|
d0f5dcd32fc04418caa9715be2779897703927cb
| 25,928 |
def showCallGraph(pyew, doprint=True, addr=None):
""" Show the callgraph of the whole program """
dot = CCallGraphGenerator(pyew)
buf = dot.generateDot()
if doprint:
showDotInXDot(buf)
return buf
|
936176e312652536dc4cea8eaf3da531ec519615
| 25,929 |
def make_template(center, data):
"""Make templated data."""
if isinstance(data, dict):
return {key: make_template(center, val) for key, val in data.items()}
if isinstance(data, list):
return [make_template(center, val) for val in data]
env = get_env(center)
return env.from_string(str(data))
|
54763209c2b65604c3c781bdbf7553198048757f
| 25,930 |
def get_sequana_adapters(type_, direction):
"""Return path to a list of adapters in FASTA format
:param tag: PCRFree, Rubicon, Nextera
:param type_: fwd, rev, revcomp
:return: path to the adapter filename
"""
# search possible types
registered = _get_registered_adapters()
if type_ not in registered:
logger.error("This adapter type (%s) is not valid" % type_)
logger.error("choose one in %s types" % registered)
raise ValueError
directions = ["fwd", "rev", "revcomp"]
if direction not in directions:
logger.error("This kind of tag (%s) is not valid" % direction)
logger.error("choose one in %s " % directions)
raise ValueError
return sequana_data("adapters_%s_%s.fa" % (type_, direction))
|
a331f9f0839d1193b9deefb3dbbdc8e31f882843
| 25,931 |
def board_str(board):
"""
String representation of the board. Unicode character for the piece,
1 for threat zone and 0 for empty zone.
"""
mat = ''
for row in board:
for squ in row:
if squ > 1:
mat += '%s ' % chr(squ)
else:
mat += '. '
mat += '\n'
return mat
|
769d846c5b03c8b75145e3b81cab17ed7331fbbf
| 25,932 |
def build_pubmed_url(pubmed_id) -> str:
"""
Generates a Pubmed URL from a Pubmed ID
:param pubmed_id: Pubmed ID to concatenate to Pubmed URL
:return: Pubmed URL
"""
return "https://pubmed.ncbi.nlm.nih.gov/" + str(pubmed_id)
|
5794fbec75de0451547d6f0570bb89964026c394
| 25,933 |
def create_build_job_query(user, time_frame, local=False):
"""Create the query to get build jobs from graylog
Args:
user(str): Fed ID
time_frame(int): Graylog search period in hours
local(bool): If True also search string for local builds
Returns:
str: Query string for a graylog request to get build jobs
"""
query_str = 'application:dls-release.py AND (message:'
if local:
query_str += FIND_BUILD_STR[BUILD] + ' OR message:' \
+ FIND_BUILD_STR[LOCAL] + ")"
else:
query_str += FIND_BUILD_STR[BUILD] + ")"
if user != "all":
query_str += " AND username:" + user
return create_graylog_query(query_str, time_frame)
|
ada2dd0c40ef0de8221e03dbdf5c2410705ff2cf
| 25,934 |
def convert_to_json(payload_content):
"""Convert the OPC DA array data to JSON (Dict) and return the aggregated JSON data."""
try:
json_response = {}
for t in payload_content: # tuple in payload_content
temp = {}
key = t[0].replace(".", "-").replace("/", "_")
if len(t) == 4:
temp["value"] = t[1]
temp["quality"] = t[2]
temp["timestamp"] = t[3]
else:
temp["value"] = "Parameters cannot be read from server"
json_response.setdefault(key, []).append(temp)
return json_response
except Exception as err:
logger.error("Failed to convert the data to JSON: %s", str(err))
return {"error": "Failed to covert the data to JSON: {}".format(err)}
|
be7aa3a60c9d8ad48e5a48e09bb16e5d456f2cba
| 25,935 |
def unites(value=32767):
"""
Restock all resistance messages.
"""
invoker = spellbook.getInvoker()
value = min(value, 32767)
invoker.restockAllResistanceMessages(value)
return 'Restocked %d unites!' % value
|
6cb8e977216b0559c1de85c14ff95983b92a11a1
| 25,936 |
def gif_summary(name, tensor, max_outputs, fps, collections=None, family=None):
"""Outputs a `Summary` protocol buffer with gif animations.
Args:
name: Name of the summary.
tensor: A 5-D `uint8` `Tensor` of shape `[batch_size, time, height, width,
channels]` where `channels` is 1 or 3.
max_outputs: Max number of batch elements to generate gifs for.
fps: frames per second of the animation
collections: Optional list of tf.GraphKeys. The collections to add the
summary to. Defaults to [tf.GraphKeys.SUMMARIES]
family: Optional; if provided, used as the prefix of the summary tag name,
which controls the tab name used for display on Tensorboard.
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
tensor = tf.image.convert_image_dtype(tensor, dtype=tf.uint8, saturate=True)
# tensor = tf.convert_to_tensor(tensor)
if skip_summary():
return tf.constant("")
with summary_op_util.summary_scope(name, family, values=[tensor]) as (tag, scope):
val = tf.py_func(
py_gif_summary,
[tag, tensor, max_outputs, fps],
tf.string,
stateful=False,
name=scope)
summary_op_util.collect(val, collections, [tf.GraphKeys.SUMMARIES])
return val
|
c0c4fda8e988c6f5a3918ae45531692ef36588e4
| 25,937 |
def fFargIm(k,phi, x):
"""Imaginary part of the argument for the integral in fF()
"""
theta=phi*x
return (1/np.sqrt(1-k*k*np.sin(theta)**2)).imag
|
ec858c9b81e881e6d91299546904670723300b82
| 25,938 |
import warnings
def deprecated(func):
"""Prints a warning for functions marked as deprecated"""
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn('Call to deprecated function "{}".'.format(func.__name__),
category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func
|
f92e71f8662d71d3ed5a93f914cf3352282d940c
| 25,939 |
def _simulate_dataset(
latent_states,
covs,
log_weights,
pardict,
labels,
dimensions,
n_obs,
update_info,
control_data,
observed_factor_data,
policies,
transition_info,
):
"""Simulate datasets generated by a latent factor model.
Args:
See simulate_data
Returns:
See simulate_data
"""
policies = policies if policies is not None else []
n_states = dimensions["n_latent_factors"]
n_periods = dimensions["n_periods"]
weights = np.exp(log_weights)[0]
loadings_df = pd.DataFrame(
data=pardict["loadings"],
index=update_info.index,
columns=labels["latent_factors"],
)
control_params_df = pd.DataFrame(
data=pardict["controls"], index=update_info.index, columns=labels["controls"]
)
meas_sds = pd.DataFrame(
data=pardict["meas_sds"].reshape(-1, 1), index=update_info.index
)
transition_params = pardict["transition"]
shock_sds = pardict["shock_sds"]
dist_args = []
for mixture in range(dimensions["n_mixtures"]):
args = {
"mean": latent_states[0][mixture],
"cov": covs[0][mixture].T @ covs[0][mixture],
}
dist_args.append(args)
latent_states = np.zeros((n_periods, n_obs, n_states))
latent_states[0] = generate_start_states(n_obs, dimensions, dist_args, weights)
for t in range(n_periods - 1):
# if there is a shock in period t, add it here
policies_t = [p for p in policies if p["period"] == t]
for policy in policies_t:
position = labels["latent_factors"].index(policy["factor"])
latent_states[t, :, position] += _get_shock(
mean=policy["effect_size"], sd=policy["standard_deviation"], size=n_obs
)
# get combined states and observed factors as jax array
to_concat = [latent_states[t], observed_factor_data[t]]
states = jnp.array(np.concatenate(to_concat, axis=-1))
# reshaping is just needed for transform sigma points
states = states.reshape(1, 1, *states.shape)
# extract trans coeffs for the period
trans_coeffs = {k: arr[t] for k, arr in transition_params.items()}
# get anchoring_scaling_factors for the period
anchoring_scaling_factors = pardict["anchoring_scaling_factors"][
jnp.array([t, t + 1])
]
# get anchoring constants for the period
anchoring_constants = pardict["anchoring_constants"][jnp.array([t, t + 1])]
# call transform_sigma_points and convert result to numpy
next_states = np.array(
transform_sigma_points(
sigma_points=states,
transition_info=transition_info,
trans_coeffs=trans_coeffs,
anchoring_scaling_factors=anchoring_scaling_factors,
anchoring_constants=anchoring_constants,
)
).reshape(n_obs, -1)
errors = multivariate_normal(
mean=np.zeros(n_states), cov=np.diag(shock_sds[t] ** 2), size=n_obs
)
next_states = next_states + errors
latent_states[t + 1] = next_states
observed_data_by_period = []
for t in range(n_periods):
meas = pd.DataFrame(
data=measurements_from_states(
latent_states[t],
control_data[t],
loadings_df.loc[t].to_numpy(),
control_params_df.loc[t].to_numpy(),
meas_sds.loc[t].to_numpy().flatten(),
),
columns=loadings_df.loc[t].index,
)
meas["period"] = t
observed_data_by_period.append(meas)
observed_data = pd.concat(observed_data_by_period, axis=0, sort=True)
observed_data["id"] = observed_data.index
observed_data.sort_values(["id", "period"], inplace=True)
latent_data_by_period = []
for t in range(n_periods):
lat = pd.DataFrame(data=latent_states[t], columns=labels["latent_factors"])
lat["period"] = t
latent_data_by_period.append(lat)
latent_data = pd.concat(latent_data_by_period, axis=0, sort=True)
latent_data["id"] = latent_data.index
latent_data.sort_values(["id", "period"], inplace=True)
return observed_data, latent_data
|
5f3c046ffea328e01580e607762311a96b9bf66d
| 25,940 |
def has_user_data(node: hou.Node, name: str) -> bool:
"""Check if a node has user data under the supplied name.
:param node: The node to check for user data on.
:param name: The user data name.
:return: Whether or not the node has user data of the given name.
"""
return _cpp_methods.hasUserData(node, name)
|
5953a24fa369f7d8c1aed7635d5fde3f30324c27
| 25,941 |
def load_ptsrc_catalog(cat_name, freqs, freq0=1.e8, usecols=(10,12,77,-5), sort=False):
"""
Load point sources from the GLEAM catalog.
Parameters
----------
cat_name : str
Filename of piunt source catalogue.
freqs : array_like
Array of frequencies to evaluate point source SEDs at (in Hz).
freq0 : float, optional
Reference frequency for power law spectra, in Hz. Default: 1e8.
usecols : tuple of int, optional
Which columns to extract the catalogue data from. Columns required (in
order) are (RA, Dec, flux, spectral_index). Assumes angles in degrees,
fluxes in Jy.
Default (for GLEAM catalogue): (10,12,77,-5).
sort : bool, optional
Whether to sort the sources by flux in the first frequency channel
(descending order). Default: False.
Returns
-------
ra_dec : array_like
RA and Dec of sources, in radians.
flux : array_like
Fluxes of point sources as a function of frequency, in Jy.
"""
bb = np.genfromtxt(cat_name, usecols=usecols)
# Get angular positions
ra_dec = np.deg2rad(bb[:,0:2])
# Calculate SEDs
flux = (freqs[:,np.newaxis]/freq0)**bb[:,3].T * bb[:,2].T
# Sort by flux if requested
if sort:
idxs = np.argsort(flux[0,:])[::-1]
flux_sorted = flux[:,idxs]
rad_dec_sorted = ra_dec[idxs,:]
return ra_dec, flux
|
54f830e9fef746cdabe8b29cc9a7481b67593476
| 25,942 |
def get_model():
"""
Reference : http://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
I have added dropout layers to reduce overfitting
"""
model = Sequential()
# Normalization and zero centering.
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape = IMG_SHAPE)) # 66x200
model.add(Conv2D(24, 5, activation = 'relu', )) # 64x196x24
model.add(MaxPooling2D(strides = (2, 2))) # 32x98x24
model.add(Dropout(0.3))
model.add(Conv2D(36, 5, activation = 'relu')) # 28x94x36
model.add(MaxPooling2D(strides = (2, 2))) # 14x47x36
model.add(Dropout(0.3))
model.add(Conv2D(48, 5, activation = 'relu')) # 10x43x48
model.add(MaxPooling2D(strides = (2, 2))) # 5x21x48
model.add(Dropout(0.3))
model.add(Conv2D(64, 3, activation = 'relu')) # 3x19x64
model.add(Conv2D(64, 3, activation = 'relu')) # 1x17x64
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(100, activation = 'relu'))
model.add(Dropout(0.3))
model.add(Dense(50, activation = 'relu'))
model.add(Dense(10, activation = 'relu'))
model.add(Dense(1))
model.summary()
return model
|
4591745f43719450f2da579b13ebd45a103bf76e
| 25,944 |
def ser_iuwt_decomposition(in1, scale_count, scale_adjust, store_smoothed):
"""
This function calls the a trous algorithm code to decompose the input into its wavelet coefficients. This is
the isotropic undecimated wavelet transform implemented for a single CPU core.
INPUTS:
in1 (no default): Array on which the decomposition is to be performed.
scale_count (no default): Maximum scale to be considered.
scale_adjust (default=0): Adjustment to scale value if first scales are of no interest.
store_smoothed (default=False):Boolean specifier for whether the smoothed image is stored or not.
OUTPUTS:
detail_coeffs Array containing the detail coefficients.
C0 (optional): Array containing the smoothest version of the input.
"""
wavelet_filter = (1./16)*np.array([1,4,6,4,1]) # Filter-bank for use in the a trous algorithm.
# Initialises an empty array to store the coefficients.
detail_coeffs = np.empty([scale_count-scale_adjust, in1.shape[0], in1.shape[1]])
C0 = in1 # Sets the initial value to be the input array.
# The following loop, which iterates up to scale_adjust, applies the a trous algorithm to the scales which are
# considered insignificant. This is important as each set of wavelet coefficients depends on the last smoothed
# version of the input.
if scale_adjust>0:
for i in range(0, scale_adjust):
C0 = ser_a_trous(C0, wavelet_filter, i)
# The meat of the algorithm - two sequential applications fo the a trous followed by determination and storing of
# the detail coefficients. C0 is reassigned the value of C on each loop - C0 is always the smoothest version of the
# input image.
for i in range(scale_adjust,scale_count):
C = ser_a_trous(C0, wavelet_filter, i) # Approximation coefficients.
C1 = ser_a_trous(C, wavelet_filter, i) # Approximation coefficients.
detail_coeffs[i-scale_adjust,:,:] = C0 - C1 # Detail coefficients.
C0 = C
if store_smoothed:
return detail_coeffs, C0
else:
return detail_coeffs
|
3a0e22ef55b14dfce3eb706800439d55f97bcd19
| 25,945 |
def delete_ref(profile, ref):
"""Delete a ref.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
ref
The ref to fetch, e.g., ``heads/my-feature-branch``.
Returns
The response of the DELETE request.
"""
resource = "/refs/" + ref
return api.delete_request(profile, resource)
|
4009c3bb787914ed4130760cf5d3fcfad4032496
| 25,946 |
def mean_ratio_reversion_test(hd1, hd2, n=20, offset=20, hold_time=30, return_index=False):
"""
Tests over the time period offset:offset-hold_time to see if the price ratio of the
price pair reverts to the mean.
"""
#Get initial price ratio
init_pr = hd1.close[offset]/hd2.close[offset]
#Get mean for the pair
pr_mean = mean_price_ratio(hd1, hd2, n=n, offset=offset)
#Calculate coefficient to use to see if the price ratio switched sides of mean pr
coeff = 1 if init_pr > pr_mean else -1
for i in xrange(offset, max(offset-hold_time, 0), -1):
if coeff*(hd1.close[i]/hd2.close[offset] - pr_mean) < 0:
if return_index:
return i
return 1
#The pair has not reverted to the mean
if return_index:
return i
return 0
|
248987fbbb718303b413fe1eb63384fba42edc07
| 25,947 |
def list_contents(path, root):
"""
@return list of relative paths rooted at "root"
"""
cmd = "svn ls %s/%s" % (root, path)
code, out = util.execute(cmd, return_out=True)
dirs = out.strip().split('\n')
return dirs
|
4fd02b8aeb362bdca1d37f4dc3d481b38f1d9059
| 25,948 |
def evaluate_voting():
"""Evaluates the Voting-Results in the instance_dict. Returns Array with the Player-ID(s)"""
poll_dict = {}
for ip in instance_dict.keys():
if 'poll' in instance_dict[ip]:
if instance_dict[ip]['poll'] in poll_dict:
old = poll_dict[instance_dict[ip]['poll']]
poll_dict.update({instance_dict[ip]['poll']: old + 1})
else:
poll_dict.update({instance_dict[ip]['poll']: 1})
# Count votes:
return max(poll_dict.keys(), key=lambda k: poll_dict[k])
|
4c86e453e24114ad00239a3ad97c23f9b25dd243
| 25,949 |
import torch
def mmd(x1, x2, sigmas):
"""the loss of maximum mean discrepancy."""
x1 = torch.reshape(x1, [x1.shape[0], -1])
x2 = torch.reshape(x2, [x2.shape[0], -1])
# print('x1x2shape:', x1.shape)
diff = torch.mean(gaussian_kernel(x1, x1, sigmas)) # mean_x1x1
diff -= 2 * torch.mean(gaussian_kernel(x1, x2, sigmas)) # mean_x1x2
diff += torch.mean(gaussian_kernel(x2, x2, sigmas)) # mean_x2x2
# print('diff:', diff, diff.shape)
return diff
|
1248de57d5aac658c54c6f67b4ba072f3dcd3978
| 25,950 |
def choose(n,r):
"""
number of combinations of n things taken r at a time (order unimportant)
"""
if (n < r):
return 0
if (n == r):
return 1
s = min(r, (n - r))
t = n
a = n-1
b = 2
while b <= s:
t = (t*a)//b
a -= 1
b += 1
return t
|
5852054f1a6381278039b0ec2184d0887e2b1d2b
| 25,953 |
def _bisearch(ucs, table):
"""
Auxiliary function for binary search in interval table.
:arg int ucs: Ordinal value of unicode character.
:arg list table: List of starting and ending ranges of ordinal values,
in form of ``[(start, end), ...]``.
:rtype: int
:returns: 1 if ordinal value ucs is found within lookup table, else 0.
"""
lbound = 0
ubound = len(table) - 1
if ucs < table[0][0] or ucs > table[ubound][1]:
return 0
while ubound >= lbound:
mid = (lbound + ubound) // 2
if ucs > table[mid][1]:
lbound = mid + 1
elif ucs < table[mid][0]:
ubound = mid - 1
else:
return 1
return 0
|
f9b985771fa94138ae9b0dfbb8fa9ee413c65a48
| 25,954 |
def get_xml_serial_number (root):
"""
Get the serial number from the system global settings XML.
Parameters:
root -- An XML element to the root of the system global settings.
Return:
The serial number.
"""
return get_xml_string_value (root, "serialNumber", "serial number")
|
5b1dd2ef70f34980cddce442a1c8707c5d81e478
| 25,955 |
def ParseMachineType(resource_parser, machine_type_name, project, location,
scope):
"""Returns the location-specific machine type uri."""
if scope == compute_scopes.ScopeEnum.ZONE:
collection = 'compute.machineTypes'
params = {'project': project, 'zone': location}
elif scope == compute_scopes.ScopeEnum.REGION:
collection = 'compute.regionMachineTypes'
params = {'project': project, 'region': location}
machine_type_uri = resource_parser.Parse(
machine_type_name, collection=collection, params=params).SelfLink()
return machine_type_uri
|
70b5311525569a4981fd0170a62a3f0d53a8a8f1
| 25,956 |
def redirect_to_default():
"""
Redirects users to main page if they make a GET request to /generate
Generate should only be POSTed to
"""
log("Received GET request for /generate, returning to default page")
return redirect(url_for("default"))
|
951e610ac3e56ec6e84dbeacb3174fe79a1c6f9c
| 25,957 |
def client_credential_grant_session():
"""Create a Session from Client Credential Grant."""
oauth2credential = OAuth2Credential(
client_id=None,
redirect_url=None,
access_token=ACCESS_TOKEN,
expires_in_seconds=EXPIRES_IN_SECONDS,
scopes=SCOPES_SET,
grant_type=auth.CLIENT_CREDENTIAL_GRANT,
client_secret=None,
refresh_token=None,
)
return Session(oauth2credential=oauth2credential)
|
54b37e3a6ae582982e47e2135058ce6d7bafd6ea
| 25,958 |
import requests
def _query(server, method, parameters, timeout=DEFAULT_TIMEOUT, verify_ssl=True, proxies=None):
"""Formats and performs the query against the API.
:param server: The MyGeotab server.
:type server: str
:param method: The method name.
:type method: str
:param parameters: The parameters to send with the query.
:type parameters: dict
:param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes).
:type timeout: float
:param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this.
:type verify_ssl: bool
:param proxies: The proxies dictionary to apply to the request.
:type proxies: dict or None
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
:raise urllib2.HTTPError: Raises when there is an HTTP status code that indicates failure.
:return: The JSON-decoded result from the server.
"""
api_endpoint = get_api_url(server)
params = dict(id=-1, method=method, params=parameters or {})
headers = get_headers()
with requests.Session() as session:
session.mount("https://", GeotabHTTPAdapter())
try:
response = session.post(
api_endpoint,
data=json_serialize(params),
headers=headers,
allow_redirects=True,
timeout=timeout,
verify=verify_ssl,
proxies=proxies,
)
except Timeout:
raise TimeoutException(server)
response.raise_for_status()
content_type = response.headers.get("Content-Type")
if content_type and "application/json" not in content_type.lower():
return response.text
return _process(json_deserialize(response.text))
|
a7615543dffc7270fddc12bc909488cdd03ad0be
| 25,959 |
def volume_update(context, volume_id, values):
"""Set the given properties on an volume and update it.
Raises NotFound if volume does not exist.
"""
return IMPL.volume_update(context, volume_id, values)
|
1f696458654daf25767ec2a888a2bfa7a8c1872d
| 25,960 |
def get_E_Elc_microwave_d_t(P_Elc_microwave_cook_rtd, t_microwave_cook_d_t):
"""時刻別消費電力量を計算する
Parameters
----------
P_Elc_microwave_cook_rtd : float
調理時の定格待機電力, W
t_microwave_cook_d_t : ndarray(N-dimensional array)
1年間の全時間の調理時間を格納したND配列, h
d日t時の調理時間が年開始時から8760個連続して格納されている
Returns
----------
E_Elc_microwave_d_t : ndarray(N-dimensional array)
1年間の全時間の消費電力量を格納したND配列, Wh
d日t時の消費電力量が年開始時から8760個連続して格納されている
"""
P_Elc_microwave_cook = get_P_Elc_microwave_cook(P_Elc_microwave_cook_rtd)
E_Elc_microwave_d_t = P_Elc_microwave_cook * t_microwave_cook_d_t
E_Elc_microwave_d_t = E_Elc_microwave_d_t * 10**(-3)
return E_Elc_microwave_d_t
|
c49666272e86c8e10b8df15e639056ba8701f88b
| 25,961 |
from typing import Tuple
from typing import Any
def set_up_text_location(
image: ImageDraw, img_opened: Image, text: str, font: ImageFont
) -> Tuple[Any, Any]:
"""
Returns coordinates of text location on the image
:param image: ImageDraw object
:param img_opened: opened PIL image
:param text: text
:param font: ImageFont
:return: Tuple[Any, Any]
"""
width_text, height_text = image.textsize(text, font)
width = (img_opened.size[0] - width_text) * WIDTH_PROPORTION
height = (img_opened.size[1] - height_text) * HEIGHT_PROPORTION
return width, height
|
26f8a7719e033cd81db66e50ecd638abd67a6846
| 25,962 |
def service_detail(request, service_id):
"""This view shows the details of a service"""
service = get_object_or_404(Service, pk=service_id)
job_statuses = (
enumerations.QUEUED,
enumerations.IN_PROCESS,
enumerations.FAILED,
)
resources_being_harvested = HarvestJob.objects.filter(
service=service, status__in=job_statuses)
already_imported_layers = Layer.objects.filter(remote_service=service)
service_list = service.service_set.all()
all_resources = (list(resources_being_harvested) +
list(already_imported_layers) + list(service_list))
paginator = Paginator(
all_resources,
getattr(settings, "CLIENT_RESULTS_LIMIT", 25),
orphans=3
)
page = request.GET.get("page")
try:
resources = paginator.page(page)
except PageNotAnInteger:
resources = paginator.page(1)
except EmptyPage:
resources = paginator.page(paginator.num_pages)
# pop the handler out of the session in order to free resources
# - we had stored the service handler on the session in order to
# speed up the register/harvest resources flow. However, for services
# with many resources, keeping the handler in the session leads to degraded
# performance
try:
request.session.pop(service.base_url)
except KeyError:
pass
return render(
request,
template_name="services/service_detail.html",
context={
"service": service,
"layers": (r for r in resources if isinstance(r, Layer)),
"services": (r for r in resources if isinstance(r, Service)),
"resource_jobs": (
r for r in resources if isinstance(r, HarvestJob)),
"permissions_json": _perms_info_json(service),
"resources": resources,
"total_resources": len(all_resources),
}
)
|
13abb413973428eda57c0c18e9ee71044f9e32ab
| 25,963 |
def convolutional_block(X, f, filters, stage, block, s):
"""
Implementation of the convolutional block as defined in Figure 4
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
s -- Integer, specifying the stride to be used
Returns:
X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
"""
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
##### MAIN PATH #####
# First component of main path
X = Conv2D(F1, (1, 1), strides = (s,s), kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3)(X)
X = Activation('relu')(X)
# Second component of main path (≈3 lines)
X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3)(X)
X = Activation('relu')(X)
# Third component of main path (≈2 lines)
X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3)(X)
##### SHORTCUT PATH #### (≈2 lines)
X_shortcut = Conv2D(filters = F3, kernel_size = (1, 1), strides = (s,s), padding = 'valid', kernel_initializer = glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis = 3)(X_shortcut)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
return X
|
ab695bd12b7179c02f2cc8696eeeedf3f153bfce
| 25,964 |
def process_input(values, puzzle_input, u_input):
"""Takes input from the user and records them in the location specified
by the "value", and returns the resulting puzzle input
"""
puzzle_input[values[0]] = u_input
return puzzle_input
|
300c0850afa977d738f5a5ebbcb1b36ccd557b23
| 25,965 |
import json
def init_store():
"""
Function to initialize the store listener.
Parameters
----------
Returns
-------
dict
request response
"""
challenge_id = str(request.form.get('challenge_id'))
flag = str(request.form.get('flag'))
user_email = str(request.form.get('user_email'))
user_id = get_user_id(user_email)
user_ip = request.remote_addr
log_received_flag(user_email, user_ip, flag, challenge_id)
if not user_id:
return {"success": False, "message": "User does not exist.", "uploaded": False}
flags_list = IndividualFlag.query.filter_by(user_id=user_id).all()
for current_flag in flags_list:
if current_flag and Flags.query.filter_by(id=current_flag.id).first().challenge_id \
== int(challenge_id):
return {"success": False, "message": "Flag was already uploaded", "uploaded": True}
req = json.loads("{}")
req["challenge_id"] = challenge_id
req["content"] = flag
req["data"] = "Case Sensitive"
req["type"] = "individual"
req["user_id"] = user_id
try:
# fill IndividualFlags and Flags table
FlagModel = IndividualFlag
flag_model = FlagModel(**req)
db.session.add(flag_model)
db.session.commit()
db.session.close()
except Exception as err:
return {"success": False, "message": "Database Error :" + str(err), "uploaded": False}
return {"success": True, "Flag_data": req, "uploaded": True}
|
c7c3c6d9f3b645edfdcf82a4fd9cdbf1abd62b01
| 25,966 |
def PyLong_AsSsize_t(space, w_long):
"""Return a C Py_ssize_t representation of the contents of pylong. If
pylong is greater than PY_SSIZE_T_MAX, an OverflowError is raised
and -1 will be returned.
"""
return space.int_w(w_long)
|
c0d64c9be4333a0d40133478bcc5fcad221d0efc
| 25,967 |
import requests
def getssa_handler() -> Response:
"""Software Statement Assertion retrieval"""
if request.method == 'POST':
try:
r = requests.get(
'{}/tpp/{}/ssa/{}'.format(
cache.get('tpp_ssa_url'),
cache.get('tpp_id'),
cache.get('software_statement_id')
),
headers=dict(
Authorization='Bearer {}'.format(
cache.get('access_token')
)
)
)
except Exception as e:
app.logger.error('Could not retrieve the SSA because: {}'.format(e))
else:
if r.status_code == 200:
cache.set('software_statement_assertion', r.text, timeout=CACHE_TIMEOUT)
else:
app.logger.error('Could not retrieve the SSA, because: {}, {}'.format(r.status_code, r.reason))
context = dict(settings=get_context())
try:
return render_template('getssa.html', context=context)
except TemplateNotFound:
abort(404)
|
ddf9c7538c12073906caf934eba07995dd6ab590
| 25,970 |
import copy
def dataTeapotShallow():
"""
Values set interactively by Dave Hale. Omit deeper samples.
"""
txf = [
30, 69,0.50, 99, 72,0.50,
63, 71,0.90, 128, 72,0.90,
29,172,0.35, 97,173,0.35,
63,173,0.75, 127,174,0.75,
33,272,0.20, 103,270,0.20,
70,271,0.60, 134,268,0.60]
n = len(txf)/3
t = zerofloat(n)
x = zerofloat(n)
f = zerofloat(n)
copy(n,0,3,txf,0,1,t)
copy(n,1,3,txf,0,1,x)
copy(n,2,3,txf,0,1,f)
#t = add(0.5,mul(0.004,t))
#x = add(0.0,mul(0.025,x))
return t,x,f
|
ef03e417424d975b24e258741c57db319f918ac1
| 25,971 |
from pathlib import Path
def count_labels(l_path, selected_class,num_workers=4):
"""Calculate anchor size.
Args:
l_path: path to labels.
selected_class: class to be calculated.
Returns:
(w, l, h)
"""
def count_single_label(label_file):
size = []
z_axis = []
num = 0
with open(label_file,"r") as f:
label_lines = f.readlines()
for label_line in label_lines:
label_line = label_line.split(" ")
if label_line[0] == selected_class:
num += 1
size.append([float(label_line[8]), float(label_line[9]), float(label_line[10])])
z_axis.append(float(label_line[13]))
np_size = np.array(size)
np_z_axis = np.array(z_axis)
if np_size.shape[0] == 0:
return 0,0,0,0,0
s_h = np_size[:, 0].sum()
s_w = np_size[:, 1].sum()
s_l = np_size[:, 2].sum()
s_z = np_z_axis.sum()
return s_h, s_w, s_l, s_z, num
label_list = list(Path(l_path).glob("**/*.txt"))
sum_h = 0
sum_w = 0
sum_l = 0
sum_z = 0
total_num = 0
with futures.ThreadPoolExecutor(num_workers) as executor:
for result in executor.map(count_single_label, label_list):
sum_h += result[0]
sum_w += result[1]
sum_l += result[2]
sum_z += result[3]
total_num += result[4]
avg_h = sum_h / total_num
avg_w = sum_w / total_num
avg_l = sum_l / total_num
avg_z = sum_z / total_num
print("the mean height of %s" % selected_class, avg_h)
print("the mean width of %s" % selected_class, avg_w)
print("the mean length of %s" % selected_class, avg_l)
print("the mean z coordinate of %s" % selected_class, avg_z)
return [round(avg_w,2), round(avg_l,2), round(avg_h,2)]
|
e027ace155e6b7ca83dc98b7b0191109ca12420d
| 25,972 |
def sorted_chromosome(all_samples):
"""
sorted_chromosome(AllSamples) -> list
:return: list of chromosome found in all samples
"""
sorted_chromosome_list = sorted(all_samples.chr_list.keys())
print(sorted_chromosome_list)
return sorted_chromosome_list
|
c1e49ac974e16c7f9b69581442186c3efc23ef70
| 25,973 |
def adjust_bb_size(bounding_box, factor, resample=False):
"""Modifies the bounding box dimensions according to a given factor.
Args:
bounding_box (list or tuple): Coordinates of bounding box (x_min, x_max, y_min, y_max, z_min, z_max).
factor (list or tuple): Multiplicative factor for each dimension (list or tuple of length 3).
resample (bool): Boolean indicating if this resize is for resampling.
Returns:
list: New coordinates (x_min, x_max, y_min, y_max, z_min, z_max).
"""
coord = []
for i in range(len(bounding_box) // 2):
d_min, d_max = bounding_box[2 * i: (2 * i) + 2]
if resample:
d_min, d_max = d_min * factor[i], d_max * factor[i]
dim_len = d_max - d_min
else:
dim_len = (d_max - d_min) * factor[i]
# new min and max coordinates
min_coord = d_min - (dim_len - (d_max - d_min)) // 2
coord.append(int(round(max(min_coord, 0))))
coord.append(int(coord[-1] + dim_len))
return coord
|
93a3c5947cb7c3335421084092dbae8840f8164b
| 25,974 |
def round_dt64(t,dt,t0=np.datetime64("1970-01-01 00:00:00")):
"""
Round the given t to the nearest integer number of dt
from a reference time (defaults to unix epoch)
"""
return clamp_dt64_helper(np.round,t,dt,t0)
|
5e3695993110f875cc62402a770c9cb56ff9e544
| 25,975 |
def distill_base():
"""Set of hyperparameters."""
# Base
hparams = common_hparams.basic_params1()
# teacher/student parameters
hparams.add_hparam("teacher_model", "")
hparams.add_hparam("teacher_hparams", "")
hparams.add_hparam("student_model", "")
hparams.add_hparam("student_hparams", "")
# Distillation parameters
# WARNING: distill_phase hparam will be overwritten in /bin/t2t_distill.py
hparams.add_hparam("distill_phase", None)
hparams.add_hparam("task_balance", 1.0)
hparams.add_hparam("distill_temperature", 1.0)
hparams.add_hparam("num_classes", 10)
# Optional Phase-specific hyperparameters
hparams.add_hparam("teacher_learning_rate", None)
hparams.add_hparam("student_learning_rate", None)
# Training parameters (stolen from ResNet)
hparams.batch_size = 128
hparams.optimizer = "Momentum"
hparams.optimizer_momentum_momentum = 0.9
hparams.optimizer_momentum_nesterov = True
hparams.weight_decay = 1e-4
hparams.clip_grad_norm = 0.0
# (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.)
hparams.learning_rate = 0.4
hparams.learning_rate_decay_scheme = "cosine"
# For image_imagenet224, 120k training steps, which effectively makes this a
# cosine decay (i.e. no cycles).
hparams.learning_rate_cosine_cycle_steps = 120000
hparams.initializer = "normal_unit_scaling"
hparams.initializer_gain = 2.
return hparams
|
7bbf85971e0323282a801c7db4e646f7b745d453
| 25,976 |
def authorize(*args, **kwargs):
"""Handle for authorization of login information."""
next_url = url_for('oauth.authorize', **{
'response_type': request.args.get('response_type'),
'client_id': request.args.get('client_id'),
'redirect_uri': request.args.get('redirect_uri'),
'scope': request.args.get('scope')
})
if not hasattr(current_user, 'id'):
return redirect(url_for('security.login', next=next_url))
"""
Assign the current_user object to a variable so that we don't
accidently alter the object during this process.
"""
this_user = current_user
if request.method == 'GET':
client_id = kwargs.get('client_id')
client = Client.query.filter_by(client_id=client_id).first()
kwargs['client'] = client
kwargs['user'] = this_user
return render_template('oauth/authorize.html', **kwargs)
confirm = request.form.get('confirm', 'no')
return confirm == 'yes'
|
e38b958567b9fa7427c2b3dd7b04cabadbbf4a8c
| 25,977 |
def summary_stats(r, riskfree_rate=0.027):
"""
Return a DataFrame that contains aggregated summary stats for the returns in the columns of r
"""
ann_r = np.round(r.aggregate(annualize_rets, periods_per_year=4), 2)
ann_vol = np.round(r.aggregate(annualize_vol, periods_per_year=4), 2)
ann_sr = np.round(r.aggregate(sharpe_ratio, riskfree_rate=riskfree_rate, periods_per_year=4),2 )
dd = np.round(r.aggregate(lambda r: drawdown(r).Drawdown.min()), 2)
skew = np.round(r.aggregate(skewness), 2)
kurt = np.round(r.aggregate(kurtosis), 2)
cf_var5 = np.round(r.aggregate(var_gaussian, modified=True), 2)
hist_cvar5 = np.round(r.aggregate(cvar_historic), 2)
return pd.DataFrame({
"Annualized Return": ann_r,
"Annualized Vol": ann_vol,
"Skewness": skew,
"Kurtosis": kurt,
"Cornish-Fisher VaR (5%)": cf_var5,
"Historic CVaR (5%)": hist_cvar5,
"Sharpe Ratio": ann_sr,
"Max Drawdown": dd
})
|
230534298d907d0bff9dd843d14e3a6c5481ddbf
| 25,978 |
def mid_longitude(geom: Geometry) -> float:
"""Return longitude of the middle point of the geomtry."""
((lon,), _) = geom.centroid.to_crs("epsg:4326").xy
return lon
|
ada60c2b72fc36af7d37cd3e063d0154484727d0
| 25,979 |
def unique_count_weight(feature):
"""Normalize count number of unique values relative to length of feature.
Args:
feature: feature/column of pandas dataset
Returns:
Normalized Number of unique values relative to length of feature.
"""
return len(feature.value_counts()) / len(feature)
|
0345208cd7d9d4bc2303db377206e5362db6cdde
| 25,980 |
def mode_glass(frame_bg, frame_fg, args_glass):
"""
Description: In glass mode, your eyes will be located with a pair of glass.
Params:
frame_bg: The background layer.
frame_fg: The canvas layer.
args_glass: The arguments used in glass mode.
"""
frame_bg = wear_glasses.wear_glasses(
frame_bg, args_glass
)
return frame_bg, frame_fg
|
4f2957a1aed66383fe2f8ea394fb4fca0f2ba550
| 25,981 |
def line_meshes(verts, edges, colors=None, poses=None):
"""Create pyrender Mesh instance for lines.
Args:
verts: np.array floats of shape [#v, 3]
edges: np.array ints of shape [#e, 3]
colors: np.array floats of shape [#v, 3]
poses: poses : (x,4,4)
Array of 4x4 transformation matrices for instancing this object.
"""
prim = pyrender.primitive.Primitive(
positions=verts,
indices=edges,
color_0=colors,
mode=pyrender.constants.GLTF.LINES,
poses=poses)
return pyrender.mesh.Mesh(primitives=[prim], is_visible=True)
|
22b5fa7aaa475e56bcb120427437881f4fa28209
| 25,982 |
import math
def isPrime(n):
"""
check if the input number n is a prime number or not
"""
if n <= 3:
return n > 1
if n % 6 != 1 and n % 6 != 5:
return False
sqrt = math.sqrt(n)
for i in range(5, int(sqrt)+1, 6):
if n % i == 0 or n % (i+2) == 0:
return False
return True
|
91da5b13840181d039902e2db3efb8cc09609465
| 25,983 |
def seqprob_forward(alpha):
"""
Total probability of observing the whole sequence using the forward algorithm
Inputs:
- alpha: A numpy array alpha[j, t] = P(Z_t = s_j, x_1:x_t)
Returns:
- prob: A float number of P(x_1:x_T)
"""
prob = 0
###################################################
prob = np.sum(alpha[:, -1])
###################################################
return prob
|
edc24df276db1e4d0ffcdd51231036f1dc1eadaa
| 25,984 |
def center_filter(gt_boxes, rect):
"""
过滤边框中心点不在矩形框内的边框
:param gt_boxes: [N,(y1,x1,y2,x2)]
:param rect: [y1,x1,y2,x2]
:return keep: 保留的边框索引号
"""
# gt boxes中心点坐标
ctr_x = np.sum(gt_boxes[:, [1, 3]], axis=1) / 2. # [N]
ctr_y = np.sum(gt_boxes[:, [0, 2]], axis=1) / 2. # [N]
y1, x1, y2, x2 = rect # 矩形框坐标
keep = np.logical_and(np.logical_and(np.logical_and(ctr_x >= x1 + 1, ctr_x <= x2 - 1),
ctr_y >= y1 + 1),
ctr_y <= y2 - 1)
return keep
|
3a9d946789b1ee2de732f6cc29970d0fdf051e1f
| 25,985 |
def named_entities(s, package="spacy"):
"""
Return named-entities.
Use Spacy named-entity-recognition.
PERSON: People, including fictional.
NORP: Nationalities or religious or political groups.
FAC: Buildings, airports, highways, bridges, etc.
ORG: Companies, agencies, institutions, etc.
GPE: Countries, cities, states.
LOC: Non-GPE locations, mountain ranges, bodies of water.
PRODUCT: Objects, vehicles, foods, etc. (Not services.)
EVENT: Named hurricanes, battles, wars, sports events, etc.
WORK_OF_ART: Titles of books, songs, etc.
LAW: Named documents made into laws.
LANGUAGE: Any named language.
DATE: Absolute or relative dates or periods.
TIME: Times smaller than a day.
PERCENT: Percentage, including ”%“.
MONEY: Monetary values, including unit.
QUANTITY: Measurements, as of weight or distance.
ORDINAL: “first”, “second”, etc.
CARDINAL: Numerals that do not fall under another type.
"""
entities = []
nlp = spacy.load('en_core_web_sm', disable=["tagger", "parser"])
# nlp.pipe is now 'ner'
for doc in nlp.pipe(s.astype("unicode").values, batch_size=32):
entities.append([(ent.text, ent.label_, ent.start_char, ent.end_char)
for ent in doc.ents])
return pd.Series(entities, index=s.index)
|
239f7f4887ee72ac5f698b2b32098019ccde3965
| 25,986 |
def get_premium_client():
"""Get a connection to the premium Minio tenant"""
return __get_minio_client__("premium")
|
f6c28453e7f04835c2dd2c91458712c7c80f3ded
| 25,987 |
def chunk_sample_text(path: str) -> list:
"""Function to chunk down a given vrt file into pieces sepparated by <> </> boundaries.
Assumes that there is one layer (no nested <> </> statements) of text elements to be separated."""
# list for data chunks
data = []
# index to refer to current chunk
i = 0
# index of seen xml elements
xml_seen = 0
with open(path, "r") as myfile:
# iterate .vrt
for line in myfile:
# if line starts with "<" and sml seen == 0 we have the first chunk
if line.startswith("<") and xml_seen == 0:
# we have now seen an xml element
xml_seen += 1
# add chunk to list-> chunk is list of three strings:
# chunk[0]: Opening "<>" statement
# chunk[1]: Text contained in chunk, every "\n" replaced with " "
# chunk[2]: Next "<>" statement
data.append(["", "", ""])
data[i][0] += line.replace("\n", " ")
elif line.startswith("<") and xml_seen > 0:
# we've seen another one
xml_seen += 1
# if we encounter a closing statement we end the current chunk
if line.startswith("</"):
data[i][2] = line.replace("\n", " ")
i += 1
data.append(["", "", ""])
# else we encountered another opening xml element and are in a nested environment
# we also start a new chunk but leave the closing statement of the previous one empty
else:
i += 1
data.append(["", "", ""])
data[i][0] = line.replace("\n", " ")
# if we are not on a line with an xml element we can just write the text to the
# text entry (idx 1) for the current chunk, "inter-chunk indexing" should be handled
# by the above case selection
else:
# append line to chunk[1], replacing "\n" with " "
data[i][1] += line.replace("\n", " ")
# if we appended empty chunks we remove them here
for chunk in data:
if all(elems == "" for elems in chunk):
data.remove(chunk)
return data
|
6e6c36db38383283bd6076f0b6b346dcfd608243
| 25,989 |
from typing import Dict
def describe_dag_diffs(x, y):
"""Returns a list of strings describing differences between x and y."""
diffs = []
# A pair of dictionaries mapping id(x_val) or id(y_val) to the first path at
# which that value was reached. These are used to check that the sharing
# stucture of `x` and `y` is the same. In particular, if x_val is in x_memo,
# then x_memo[id(x_val)] should be equal to y_memo[id(y_val)]. If not, then
# the sharing structure is different.
x_memo: Dict[int, daglish.Path] = {}
y_memo: Dict[int, daglish.Path] = {}
def values_diff_message(x_val, y_val, path):
"""A message indicating that `x_val` != `y_val` at `path`."""
path_str = daglish.path_str(path)
x_repr = repr(x_val)
y_repr = repr(y_val)
if len(x_repr) + len(y_repr) + len(path_str) < 70:
return f'* x{path_str}={x_repr} but y{path_str}={y_repr}'
else:
# For longer values, it's easier to spot differences if the two
# values are displayed on separate lines.
return f'* x{path_str}={x_repr} but\n y{path_str}={y_repr}'
def find_diffs(x_val, y_val, path):
"""Adds differences between `x_val` and `y_val` to `diffs`."""
# Compare the sharing structure of x_val and y_val.
shared_x_path = x_memo.get(id(x_val))
shared_y_path = y_memo.get(id(y_val))
if shared_x_path is not None and shared_x_path == shared_y_path:
return # We have already compared x_val with y_val.
if shared_x_path is None:
x_memo[id(x_val)] = path
else:
path_str = daglish.path_str(path)
x_path = daglish.path_str(shared_x_path)
diffs.append(f'* Sharing diff: x{path_str} is x{x_path} but '
f'y{path_str} is not y{x_path}')
if shared_y_path is None:
y_memo[id(y_val)] = path
else:
path_str = daglish.path_str(path)
y_path = daglish.path_str(shared_y_path)
diffs.append(f'* Sharing diff: y{path_str} is y{y_path} but '
f'x{path_str} is not x{y_path}')
# Compare x_val and y_val by type.
if type(x_val) is not type(y_val):
path_str = daglish.path_str(path)
diffs.append(f'* type(x{path_str}) != type(y{path_str}): '
f'{type(x_val)} vs {type(y_val)}')
return # Don't report any futher differences between x_val and y_val.
# Compare x_val and y_val by value.
node_traverser = daglish.find_node_traverser(type(x_val))
if node_traverser is None:
if x_val != y_val:
diffs.append(values_diff_message(x_val, y_val, path))
else:
x_children, x_metadata = node_traverser.flatten(x_val)
y_children, y_metadata = node_traverser.flatten(y_val)
x_path_elements = node_traverser.path_elements(x_val)
y_path_elements = node_traverser.path_elements(y_val)
if x_path_elements != y_path_elements:
for path_elt in set(x_path_elements) - set(y_path_elements):
child_path = daglish.path_str(path + (path_elt,))
diffs.append(
f'* x{child_path} has a value but y{child_path} does not.')
for path_elt in set(y_path_elements) - set(x_path_elements):
child_path = daglish.path_str(path + (path_elt,))
diffs.append(
f'* y{child_path} has a value but x{child_path} does not.')
elif x_metadata != y_metadata:
diffs.append(values_diff_message(x_val, y_val, path))
else:
# Recursively check children. Note: we only recurse if type,
# path_elements, and metadata are all equal.
assert len(x_children) == len(y_children) == len(x_path_elements)
for x_child, y_child, path_elt in zip(x_children, y_children,
x_path_elements):
find_diffs(x_child, y_child, path + (path_elt,))
find_diffs(x, y, ())
return sorted(diffs)
|
4e3a767af9dd64b119111860a039a99b2b210a0f
| 25,990 |
def ec_cert(cert_dir):
"""Pass."""
return cert_dir / "eccert.pem"
|
71cf6d98b05e4fc80515936d7aedc6f184fbe0a6
| 25,991 |
from datetime import datetime
def get_expiries(body):
"""
:type body: BeautifulSoup
"""
_ex = body.find_all('select', {'id': 'date', 'name': 'date'})
ex = []
for ch in _ex:
for _e in ch:
try:
ex.append(datetime.strptime(_e.text, '%d%b%Y').date())
except ValueError:
pass
except AttributeError:
pass
return ex
|
09d7f067aa283ff930151b129378785dcbc17b09
| 25,992 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.