content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def lighten(data, amt=0.10, is255=False):
"""Lighten a vector of colors by fraction `amt` of remaining possible intensity.
New colors are calculated as::
>>> new_colors = data + amt*(1.0 - data)
>>> new_colors[:, -1] = 1 # keep all alpha at 1.0
Parameters
----------
data : matplotlib colorspec or sequence of colorspecs
input color(s)
amt : float, optional
Percentage by which to lighten `r`, `g`, and `b`. `a` remains unchanged
(Default: 0.10)
is255 : bool, optional
If `True`, rgb values in `data` are assumed to be tween 0 and 255
rather than 0.0 and 1.0. In this case, return values will also
be between 0 and 255.
Returns
-------
numpy.ndarray
Lightened version of data
"""
data = colorConverter.to_rgba_array(data)
new_colors = data + amt * (1.0 - data)
if is255:
new_colors = (255 * new_colors).round()
new_colors[:, -1] = data[:, -1]
return new_colors | 195faa21ba30989f900b9b7f2b655a97074d8833 | 13,731 |
def _determine_function_name_type(node):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:returns: One of ('function', 'method', 'attr')
"""
if not node.is_method():
return 'function'
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if (isinstance(decorator, astroid.Name) or
(isinstance(decorator, astroid.Getattr) and
decorator.attrname == 'abstractproperty')):
infered = safe_infer(decorator)
if infered and infered.qname() in PROPERTY_CLASSES:
return 'attr'
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
elif (isinstance(decorator, astroid.Getattr) and
decorator.attrname in ('setter', 'deleter')):
return 'attr'
return 'method' | d80cfd4aabdd79023d636c7425b04e747420ad36 | 13,732 |
from typing import Tuple
import datasets
from typing import List
from typing import Dict
from re import T
def create_dataset(
template_path:
str = 'com_github_corypaik_coda/projects/coda/data/coda/templates.yaml',
objects_path:
str = 'com_github_corypaik_coda/projects/coda/data/coda/objects.jsonl',
annotations_path:
str = 'com_github_corypaik_coda/projects/coda/data/coda/annotations.jsonl',
seed_for_splits: int = 12345,
seed_for_kmeans: int = 0,
) -> Tuple[datasets.DatasetDict, pd.DataFrame]:
""" Prepares a dataset and saves it disk
Args:
metadata_path: File to save with metadata about each object.
output_dataset_dir: Directory to save the dataset to disk.
Returns:
ds: dataset containing all formatted examples (train, val, test splits)
meta: dataframe containing metadata about each object.
"""
# maybe convert paths
template_path = maybe_rlocation(template_path)
objects_path = maybe_rlocation(objects_path)
annotations_path = maybe_rlocation(annotations_path)
# process annotations
df = pd.read_json(annotations_path, orient='records', lines=True)
# normalize
# normalize
df[COLORS] = df[COLORS].div(df[COLORS].sum(axis=1), 0)
df = df.set_index(['class_id', 'worker_id'], verify_integrity=True)
# apply a filter
df = df.groupby('class_id', as_index=False).apply(_filter_annotations)
df = df.reset_index()
# average annotations
df = df.groupby('class_id', as_index=False).mean()
# kmeans for groupings.
df = _get_object_groups(df, seed=seed_for_kmeans)
# add template data. this also drops a few objects that we have annotations
# for but are not included.
tdf = pd.read_json(objects_path, orient='records', lines=True)
df = df.merge(tdf, on='class_id', validate='one_to_one')
df = df.sort_values('class_id')
meta = df
templates = _load_templates(template_path=template_path)
# the real dataset: split groundtruth and filtered
# gives us a dict for each split containing a list of objects (example form)
split_objects = _generate_splits(df, seed=seed_for_splits)
def _process_split(x: List[Dict[str, _T]]) -> Dict[str, List[_T]]:
x = T.mapcat(_generate_examples_for_obj(templates=templates), x)
x = list(x)
x = {k: [el[k] for el in x] for k in x[0].keys()}
return x
# map each
data = T.valmap(_process_split, split_objects)
# metadata
features = datasets.Features({
'class_id':
datasets.Value('string'),
'display_name':
datasets.Value('string'),
'ngram':
datasets.Value('string'),
'label':
datasets.Sequence(datasets.Value('float')),
'object_group':
datasets.ClassLabel(names=('Single', 'Multi', 'Any')),
'text':
datasets.Value('string'),
'template_group':
datasets.ClassLabel(names=('clip-imagenet', 'text-masked')),
'template_idx':
datasets.Value('int32')
})
# create dataset
ds = datasets.DatasetDict(
**{
split: datasets.Dataset.from_dict(
mapping=mapping,
features=features,
split=split,
) for split, mapping in data.items()
})
return ds, meta | 915bb616e165b55234f140f2ea1577644876ddb7 | 13,734 |
def escape_blog_content(data):
"""Экранирует описание блога."""
if not isinstance(data, binary):
raise ValueError('data should be bytes')
f1 = 0
f2 = 0
# Ищем начало блока
div_begin = b'<div class="blog-description">'
f1 = data.find(b'<div class="blog-content text">')
if f1 >= 0:
f1 = data.find(div_begin, f1, f1 + 200)
# Ищем конец
if f1 >= 0:
f2 = data.find(b'<ul class="blog-info">', f1 + 1)
if f2 >= 0:
f2 = data.rfind(b'</div>', f1 + 1, f2)
if f1 < 0 or f2 < 0:
# Не нашли
return data
body = data[f1 + len(div_begin):f2].strip()
body = html_escape(body)
result = (
data[:f1],
b'<div class="blog-content text" data-escaped="1">',
body,
data[f2:]
)
return b''.join(result) | 285825b253de8ef9b67d7d3f1bdaa7a28f2e918c | 13,735 |
import csv
def read_csv(file_path, delimiter=",", encoding="utf-8"):
"""
Reads a CSV file
Parameters
----------
file_path : str
delimiter : str
encoding : str
Returns
-------
collection
"""
with open(file_path, encoding=encoding) as file:
data_in = list(csv.reader(file, delimiter=delimiter))
return data_in | a4f1da219b0e5d752ff606614e93abbfc3d30597 | 13,736 |
from typing import Tuple
from pathlib import Path
def get_cmd_items(pair: Tuple[str, Path]):
"""Return a list of Albert items - one per example."""
with open(pair[-1], "r") as f:
lines = [li.strip() for li in f.readlines()]
items = []
for i, li in enumerate(lines):
if not li.startswith("- "):
continue
desc = li.lstrip("- ")[:-1]
example_cmd = sanitize_string(
lines[i + 2].strip("`").replace("{{", "").replace("}}", "")
)
items.append(
v0.Item(
id=__prettyname__,
icon=icon_path,
text=example_cmd,
subtext=desc,
actions=[
v0.ClipAction("Copy command", example_cmd),
v0.UrlAction(
"Do a google search",
f'https://www.google.com/search?q="{pair[0]}" command',
),
],
)
)
return items | 92d34ce5af3a3dbe162adf0766382120d0458c46 | 13,737 |
import importlib
def import_activity_class(activity_name, reload=True):
"""
Given an activity subclass name as activity_name,
attempt to lazy load the class when needed
"""
try:
module_name = "activity." + activity_name
importlib.import_module(module_name)
return True
except ImportError as e:
return False | b4cea3fad1f08a5758972847d3e03a41f89f223c | 13,738 |
def rgb2hsv(rgb):
"""
Reverse to :any:`hsv2rgb`
"""
eps = 1e-6
rgb = np.asarray(rgb).astype(float)
maxc = rgb.max(axis=-1)
minc = rgb.min(axis=-1)
v = maxc
s = (maxc - minc) / (maxc + eps)
s[maxc <= eps] = 0.0
rc = (maxc - rgb[:, :, 0]) / (maxc - minc + eps)
gc = (maxc - rgb[:, :, 1]) / (maxc - minc + eps)
bc = (maxc - rgb[:, :, 2]) / (maxc - minc + eps)
h = 4.0 + gc - rc
maxgreen = (rgb[:, :, 1] == maxc)
h[maxgreen] = 2.0 + rc[maxgreen] - bc[maxgreen]
maxred = (rgb[:, :, 0] == maxc)
h[maxred] = bc[maxred] - gc[maxred]
h[minc == maxc] = 0.0
h = (h / 6.0) % 1.0
return np.asarray((h, s, v)) | febb268b1b691897c28447ff00a29785742dfc0c | 13,739 |
def fig_colorbar(fig, collections, *args, **kwargs):
"""Add colorbar to the right on a figure."""
fig.subplots_adjust(right=0.8)
cax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
cbar = fig.colorbar(collections, cax, *args, **kwargs)
plt.pause(0.1)
return cbar | a3156d24e28407938661c003d30b80a4d57638e6 | 13,740 |
def _merge_css_item(item):
"""Transform argument into a single list of string values."""
# Recurse lists and tuples to combine into single list
if isinstance(item, (list, tuple)):
return _merge_css_list(*item)
# Cast to string, be sure to cast falsy values to ''
item = "{}".format(item) if item else ""
# Return as a list
return [item] | c6f0c8769761640d5b0d98168cee1308f3209072 | 13,741 |
def extract_arguments(start, string):
""" Return the list of arguments in the upcoming function parameter closure.
Example:
string (input): '(blocks, threads, 0, THCState_getCurrentStream(state))'
arguments (output):
'[{'start': 1, 'end': 7},
{'start': 8, 'end': 16},
{'start': 17, 'end': 19},
{'start': 20, 'end': 53}]'
"""
arguments = []
closures = {
"<": 0,
"(": 0
}
current_position = start
argument_start_pos = current_position + 1
# Search for final parenthesis
while current_position < len(string):
if string[current_position] == "(":
closures["("] += 1
elif string[current_position] == ")":
closures["("] -= 1
elif string[current_position] == "<":
closures["<"] += 1
elif string[current_position] == ">" and string[current_position - 1] != "-" and closures["<"] > 0:
closures["<"] -= 1
# Finished all arguments
if closures["("] == 0 and closures["<"] == 0:
# Add final argument
arguments.append({"start": argument_start_pos, "end": current_position})
break
# Finished current argument
if closures["("] == 1 and closures["<"] == 0 and string[current_position] == ",":
arguments.append({"start": argument_start_pos, "end": current_position})
argument_start_pos = current_position + 1
current_position += 1
return arguments | 8e6e3fecc0643aa3f55108916a7c6892a96f13aa | 13,742 |
def iter_archive(path, method):
"""Iterate over an archive.
Args:
path: `str`, archive path
method: `tfds.download.ExtractMethod`, extraction method
Returns:
An iterator of `(path_in_archive, f_obj)`
"""
return _EXTRACT_METHODS[method](path) | dda105efc20583f54aed26a55112cfc56380ad68 | 13,744 |
def unfoldPath(cwd, path):
"""
Unfold path applying os.path.expandvars and os.path.expanduser.
Join 'path' with 'cwd' in the beginning If 'path' is not absolute path.
Returns normalized absolute path.
"""
if not path:
return path
path = _expandvars(path)
path = _expanduser(path)
if not _isabs(path):
path = _joinpath(cwd, path)
path = _abspath(path)
return path | ae11a69e6b2a3b0be4b3f266960d6004e99bd261 | 13,745 |
def get_predictions(logits):
"""
Convert logits into softmax predictions
"""
probs = F.softmax(logits, dim=1)
confidence, pred = probs.max(dim=1, keepdim=True)
return confidence, pred, probs | c83a47140534e27bb14991d4c8b2192a2a02cd46 | 13,748 |
def lookup_capacity(lookup_table, environment, cell_type, frequency, bandwidth,
generation, site_density):
"""
Use lookup table to find capacity by clutter environment geotype,
frequency, bandwidth, technology generation and site density.
"""
if (environment, cell_type, frequency, bandwidth, generation) not in lookup_table:
raise KeyError("Combination %s not found in lookup table",
(environment, cell_type, frequency, bandwidth, generation))
density_capacities = lookup_table[
(environment, cell_type, frequency, bandwidth, generation)
]
lowest_density, lowest_capacity = density_capacities[0]
if site_density < lowest_density:
return 0
for a, b in pairwise(density_capacities):
lower_density, lower_capacity = a
upper_density, upper_capacity = b
if lower_density <= site_density and site_density < upper_density:
result = interpolate(
lower_density, lower_capacity,
upper_density, upper_capacity,
site_density
)
return result
# If not caught between bounds return highest capacity
highest_density, highest_capacity = density_capacities[-1]
return highest_capacity | c98b25611cf72cc202fea060063f3020732a5282 | 13,749 |
def contig_slow(fn, num):
"""brute force, quadratic"""
data = parse(fn)
for i in range(len(data)-2):
for j in range(i + 2, len(data)-1):
s = sum(data[i:j])
if s == num:
return min(data[i:j]) + max(data[i:j]) | 69c086c605afc17a63def6e3958e340ddb7a32c3 | 13,750 |
def create_consts(*args) -> superclasses.PyteAugmentedArgList:
"""
Creates a new list of names.
:param args: The args to use.
"""
return _create_validated(*args, name="consts") | b10ab2d0e30e5cfb54c284d4557a98c0b3eb69c6 | 13,751 |
def opts2constr_feat_gen(opts):
"""Creates ConstFeatPlanes functor by calling its constructor with
parameters from opts.
Args:
opts (obj): Namespace object returned by parser with settings.
Returns:
const_feat_planes (obj): Instantiated ConstFeatPlanes functor.
"""
return ConstrFeatGen(
opts.const_feat_fac) | 5f664aae10f0584aca14ff58d2a984c29fd0dc2d | 13,752 |
def cmd_convert_items_to_cheetah_list(list):
"""
Cheetah templates can't iterate over a list of classes, so
converts all data into a Cheetah-friendly list of tuples
(NAME, DESCRIPTION, ENUM, HAS_BIT_OFFSET, BIT_OFFSET, BITS, TYPE, MIN, MAX, DEFAULT)
"""
temp = []
for i in list:
temp.append(cmd_convert_to_tuple(i))
return temp | ff6933dce38d6ddcd74df72ce321d8f16dfd5074 | 13,754 |
def pose223(pose:gtsam.Pose2) -> gtsam.Pose3:
"""convert a gtsam.Pose2 to a gtsam.Pose3
Args:
pose (gtsam.Pose2): the input 2D pose
Returns:
gtsam.Pose3: the 3D pose with zeros for the unkown values
"""
return gtsam.Pose3(
gtsam.Rot3.Yaw(pose.theta()), gtsam.Point3(pose.x(), pose.y(), 0)
) | 0a6d738d9cbe035be55a884a1523c985d547f25f | 13,755 |
import random
def pivot_calibration_with_ransac(tracking_matrices,
number_iterations,
error_threshold,
concensus_threshold,
early_exit=False
):
"""
Written as an exercise for implementing RANSAC.
:param tracking_matrices: N x 4 x 4 ndarray, of tracking matrices.
:param number_iterations: the number of iterations to attempt.
:param error_threshold: distance in millimetres from pointer position
:param concensus_threshold: the minimum percentage of inliers to finish
:param early_exit: If True, returns model as soon as thresholds are met
:returns: pointer offset, pivot point and RMS Error about centroid of pivot.
:raises: TypeError, ValueError
"""
if number_iterations < 1:
raise ValueError("The number of iterations must be > 1")
if error_threshold < 0:
raise ValueError("The error threshold must be a positive distance.")
if concensus_threshold < 0 or concensus_threshold > 1:
raise ValueError("The concensus threshold must be [0-1] as percentage")
if not isinstance(tracking_matrices, np.ndarray):
raise TypeError("tracking_matrices is not a numpy array'")
number_of_matrices = tracking_matrices.shape[0]
population_of_indices = range(number_of_matrices)
minimum_matrices_required = 3
highest_number_of_inliers = -1
best_model = None
best_rms = -1
for iter_counter in range(number_iterations):
indexes = random.sample(population_of_indices,
minimum_matrices_required)
sample = tracking_matrices[indexes]
try:
model, _ = pivot_calibration(sample)
except ValueError:
print("RANSAC, iteration " + str(iter_counter) + ", failed.")
continue
# Need to evaluate the number of inliers.
# Slow, but it's written as a teaching exercise.
world_point = model[3:6]
number_of_inliers = 0
inlier_indices = []
for matrix_counter in range(number_of_matrices):
offset = np.vstack((model[0:3], 1))
transformed_point = tracking_matrices[matrix_counter] @ offset
diff = world_point - transformed_point[0:3]
norm = np.linalg.norm(diff)
if norm < error_threshold:
number_of_inliers = number_of_inliers + 1
inlier_indices.append(matrix_counter)
percentage_inliers = number_of_inliers / number_of_matrices
# Keep the best model so far, based on the highest number of inliers.
if percentage_inliers > concensus_threshold \
and number_of_inliers > highest_number_of_inliers:
highest_number_of_inliers = number_of_inliers
inlier_matrices = tracking_matrices[inlier_indices]
best_model, best_rms = pivot_calibration(inlier_matrices)
# Early exit condition, as soon as we find model with enough fit.
if percentage_inliers > concensus_threshold and early_exit:
return best_model, best_rms
if best_model is None:
raise ValueError("Failed to find a model using RANSAC.")
print("RANSAC Pivot, from " + str(number_of_matrices)
+ " matrices, used " + str(highest_number_of_inliers)
+ " matrices, with error threshold = " + str(error_threshold)
+ " and consensus threshold = " + str(concensus_threshold)
)
return best_model, best_rms | 0ce7c7bd8afbc88093793601da2b0333b40766cb | 13,756 |
def find_companies_name_dict():
"""
Finds companies names and addresses
:return: a dict with resource name eg.area of companies and url of available data
"""
base = "https://data.gov.ro/api/3/action/"
query = "Date-de-identificare-platitori"
address = url_build.build_url_package_query(base, query)
# dictionary with available files and download url
data_platitori = {}
# check for valid url
packages_exists = url_response.valid_url(address)
if packages_exists:
# find available packages
avlb_package = url_response.get_avlb_package(address)
# resources are at ['results'][0]['resources']
resources = avlb_package['results'][0]['resources']
# num avl resource
num_resources = avlb_package['results'][0]['num_resources']
# sanity check
count = 0
# loop over list and build a dict with name of resource and url
for x in resources:
package_name = x['name']
package_url = x['url']
temp_dict = {package_name: package_url}
data_platitori.update(temp_dict)
count += 1
# sanity check
if count == num_resources:
print("all resources founded!")
return data_platitori
raise Exception("Invalid query to find companies names") | 74526747f45a4c5491e4778759baca53a638c97f | 13,757 |
import typing
def error_to_response(request: web.Request,
error: typing.Union[Error, ErrorList]):
"""
Convert an :class:`Error` or :class:`ErrorList` to JSON API response.
:arg ~aiohttp.web.Request request:
The web request instance.
:arg typing.Union[Error, ErrorList] error:
The error, which is converted into a response.
:rtype: ~aiohttp.web.Response
"""
if not isinstance(error, (Error, ErrorList)):
raise TypeError('Error or ErrorList instance is required.')
return jsonapi_response(
{
'errors':
[error.as_dict] if isinstance(error, Error) else error.as_dict,
'jsonapi': request.app[JSONAPI]['jsonapi']
},
status=error.status
) | 792c3fccd8d7fee708d850169fd943010e92ab05 | 13,758 |
def read(handle):
"""read(handle)"""
record = Record()
__read_version(record, handle)
__read_database_and_motifs(record, handle)
__read_section_i(record, handle)
__read_section_ii(record, handle)
__read_section_iii(record, handle)
return record | 90921ec1779c313505a838863509838bd858d0b7 | 13,759 |
import json
def validate_telegam():
"""Validate telegram token and chat ID
"""
configs = InitialConfig()
confs = ["chat_id", "bot_token"]
conf_dict = {}
if request.method == "GET":
for conf in confs:
conf_dict[conf] = getattr(configs, conf)
conf_json = json.dumps(conf_dict)
return conf_json
if request.headers.get("Content-Type") == "application/json":
for conf in confs:
value = request.json.get(conf)
if not value:
return HTTPResponse(f"{conf} should have a value", 400)
elif not isinstance(value, str):
return HTTPResponse(f"{conf} should be str", 400)
else:
setattr(configs, conf, value)
# Check telegram bot token
try:
bot = Bot(request.json["bot_token"])
bot.sendMessage(request.json["chat_id"], "Configured")
except (InvalidToken, BadRequest, Unauthorized) as error:
if error.message == "Unauthorized":
error.message += ": Invalid Token"
return HTTPResponse(error.message, 400)
configs.save()
return HTTPResponse("Configured", 200) | f597d75672639dc2a39eb100f7221c508f62cf06 | 13,760 |
def hour(e):
"""
:rtype: Column
"""
return col(Hour(ensure_column(e))) | 492d9e21f2f7c3fd6107dd4000c8273efaa0357c | 13,761 |
def infer_gaussian(data):
"""
Return (amplitude, x_0, y_0, width), where width - rough estimate of
gaussian width
"""
amplitude = data.max()
x_0, y_0 = np.unravel_index(np.argmax(data), np.shape(data))
row = data[x_0, :]
column = data[:, y_0]
x_0 = float(x_0)
y_0 = float(y_0)
dx = len(np.where(row - amplitude/2 > 0)[0])
dy = len(np.where(column - amplitude/2 > 0)[0])
width = np.sqrt(dx ** 2. + dy ** 2.)
return amplitude, x_0, y_0, width | 784e88e5cd58def8467cbe0a851b37cc1fefe9dd | 13,762 |
import math
def extract_freq(bins=5, **kwargs):
"""
Extract frequency bin features.
Args:
bins (int): The number of frequency bins (besides OOV)
Returns:
(function): A feature extraction function that returns the log of the \
count of query tokens within each frequency bin.
"""
def _extractor(query, resources):
tokens = query.normalized_tokens
stemmed_tokens = query.stemmed_tokens
freq_dict = resources[WORD_FREQ_RSC]
max_freq = freq_dict.most_common(1)[0][1]
freq_features = defaultdict(int)
for idx, tok in enumerate(tokens):
tok = mask_numerics(tok)
if kwargs.get(ENABLE_STEMMING, False):
stemmed_tok = stemmed_tokens[idx]
stemmed_tok = mask_numerics(stemmed_tok)
freq = freq_dict.get(tok, freq_dict.get(stemmed_tok, 0))
else:
freq = freq_dict.get(tok, 0)
if freq < 2:
freq_features["in_vocab:OOV"] += 1
else:
# Bin the frequency with break points at
# half max, a quarter max, an eighth max, etc.
freq_bin = int(math.log(max_freq, 2) - math.log(freq, 2))
if freq_bin < bins:
freq_features["in_vocab:IV|freq_bin:{}".format(freq_bin)] += 1
else:
freq_features["in_vocab:IV|freq_bin:{}".format(bins)] += 1
q_len = float(len(tokens))
for k in freq_features:
# sublinear
freq_features[k] = math.log(freq_features[k] + 1, 2)
# ratio
freq_features[k] /= q_len
return freq_features
return _extractor | b07f2f1810a26c2d04366d5516aac0ca79b547bb | 13,763 |
import typing
def create_private_key_params(key_type: str) -> typing.Type[PrivateKeyParams]:
"""Returns the class corresponding to private key parameters objects of the
given key type name.
Args:
key_type
The name of the OpenSSH key type.
Returns:
The subclass of :any:`PrivateKeyParams` corresponding to the key type
name.
Raises:
KeyError: There is no subclass of :any:`PrivateKeyParams` corresponding
to the given key type name.
"""
return _KEY_TYPE_MAPPING[key_type].privateKeyParamsClass | 6702f93f8cd8dc3fd104db5d63efd7db4bbaa38e | 13,764 |
import json
import requests
def get_response(msg):
"""
访问图灵机器人openApi
:param msg 用户输入的文本消息
:return string or None
"""
apiurl = "http://openapi.tuling123.com/openapi/api/v2"
# 构造请求参数实体
params = {"reqType": 0,
"perception": {
"inputText": {
"text": msg
}
},
"userInfo": {
"apiKey": "ca7bf19ac0e644c38cfbe9d6fdc08de1",
"userId": "439608"
}}
# 将表单转换为json格式
content = json.dumps(params)
# 发起post请求
r = requests.post(url=apiurl, data=content, verify=False).json()
print("r = " + str(r))
# 解析json响应结果
# {'emotion':{
# 'robotEmotion': {'a': 0, 'd': 0, 'emotionId': 0, 'p': 0},
# 'userEmotion': {'a': 0, 'd': 0, 'emotionId': 10300, 'p': 0}
# },
# 'intent': {
# 'actionName': '',
# 'code': 10004,
# 'intentName': ''
# },
# 'results': [{'groupType': 1, 'resultType': 'text', 'values': {'text': '欢迎来到本机器人的地盘。'}}]}
code = r['intent']['code']
if code == 10004 or code == 10008:
message = r['results'][0]['values']['text']
return message
return None | 9a542b56a3ed3db8b8a9306ea3d425054a4ca64b | 13,765 |
import torch
def lm_sample_with_constraints(lm_model,
max_decode_steps,
use_cuda,
device,
batch_size=1,
alpha_0=1,
alpha=1,
beta=0,
repeat_penalty=0,
history_penalty=0,
history_penalty_beta=0,
penalty_vocab_start=-1,
penalty_vocab_end=-1,
prefix=None,
gamma=1,
normalize="none",
top_k=-1,
top_k0=-1,
top_p=-1,
top_p0=-1,
eos=None,
need_mask_unk=True,
return_states=False):
"""
"""
if eos is None:
eos = lm_model.EOS
dec_states = lm_model.init_search()
search_states = init_search(lm_model, batch_size)
if use_cuda == True:
search_states = nested_to_cuda(search_states, device)
y = search_states[0]
log_probs = search_states[1]
finished = search_states[2]
mask_finished = search_states[3]
hypothesis = search_states[4]
history_log_probs = search_states[5]
gamma = torch.tensor(gamma, dtype=torch.float, device=y.device)
mask_unk = None
if need_mask_unk == True:
mask_unk = get_single_token_mask(lm_model.trg_vocab_size,
lm_model.UNK,
lm_model.MIN_LOGITS)
if use_cuda == True:
mask_unk = nested_to_cuda(mask_unk, device)
steps = 0
trg_seq_len = 0
vocab_size = lm_model.trg_vocab_size
max_decode_steps = min(max_decode_steps, lm_model.trg_max_len - trg_seq_len)
while not finished.all() and steps < max_decode_steps:
outputs = lm_model.decoder._step(steps,
dec_states,
y)
dec_states, logits = outputs[0:2]
if mask_unk is not None:
logits += mask_unk
if steps > 1 and repeat_penalty < 0:
logits += get_multi_token_mask(hypothesis,
vocab_size,
-2,
steps,
repeat_penalty, 0,
penalty_vocab_start,
penalty_vocab_end)
if steps > 2 and history_penalty < 0:
logits += get_multi_token_mask(hypothesis,
vocab_size,
0,
-2,
history_penalty,
history_penalty_beta,
penalty_vocab_start,
penalty_vocab_end)
mask = finished.type(torch.float)
mask_logits = logits * (1 - mask) + mask_finished * mask
_log_probs = F.log_softmax(logits, 1)
temp = alpha_0
if steps > 0:
temp = alpha + steps * beta
if prefix is not None and steps < prefix.size(1):
is_prefix = (prefix[:,steps:steps+1]).ne(lm_model.PAD).float()
prefix_mask = torch.zeros_like(mask_logits)
prefix_mask.scatter_(1, prefix[:, steps:steps+1],
lm_model.MAX_LOGITS)
mask_logits += (prefix_mask * is_prefix)
indice = top_k_top_p_sampling(mask_logits, -1, -1)
elif steps == 0:
indice = top_k_top_p_sampling(mask_logits, top_k0, top_p0, temp)
else:
indice = top_k_top_p_sampling(mask_logits, top_k, top_p, temp)
y = (indice % vocab_size).view(-1, 1)
finished = (finished | y.eq(eos).byte())
hypothesis = torch.cat([hypothesis, y], 1)
_log_probs = torch.gather(_log_probs, 1, indice)
log_probs = log_probs + _log_probs * (1 - mask)
history_log_probs = torch.cat([history_log_probs, _log_probs], 1)
steps += 1
trg_seq_len += 1
hyp_len = torch.sum(hypothesis.ne(lm_model.PAD).float(), 1)
normalized_score = \
normalize_log_probs(log_probs, hyp_len, gamma, normalize)
outputs = [hypothesis, normalized_score]
if return_states == True:
outputs = [hypothesis,
normalized_score,
history_log_probs,
dec_states,
y,
log_probs,
finished,
mask_finished]
return outputs | cbccd2c0a2b91fa3e5ff5efb8b394fe7418f5b8b | 13,766 |
import torch
import tqdm
def validate_official(args, data_loader, model, global_stats=None):
"""Run one full official validation. Uses exact spans and same
exact match/F1 score computation as in the SQuAD script.
Extra arguments:
offsets: The character start/end indices for the tokens in each context.
texts: Map of qid --> raw text of examples context (matches offsets).
answers: Map of qid --> list of accepted answers.
"""
eval_time = Timer()
# Run through examples
examples = 0
map = AverageMeter()
mrr = AverageMeter()
prec_1 = AverageMeter()
prec_3 = AverageMeter()
prec_5 = AverageMeter()
with torch.no_grad():
pbar = tqdm(data_loader)
for ex in pbar:
ids, batch_size = ex['ids'], ex['batch_size']
scores = model.predict(ex)
predictions = np.argsort(-scores.cpu().numpy()) # sort in descending order
labels = ex['label'].numpy()
map.update(MAP(predictions, labels))
mrr.update(MRR(predictions, labels))
prec_1.update(precision_at_k(predictions, labels, 1))
prec_3.update(precision_at_k(predictions, labels, 3))
prec_5.update(precision_at_k(predictions, labels, 5))
if global_stats is None:
pbar.set_description('[testing ... ]')
else:
pbar.set_description("%s" % 'Epoch = %d [validating... ]' % global_stats['epoch'])
examples += batch_size
result = dict()
result['map'] = map.avg
result['mrr'] = mrr.avg
result['prec@1'] = prec_1.avg
result['prec@3'] = prec_3.avg
result['prec@5'] = prec_5.avg
if global_stats is None:
logger.info('test results: MAP = %.2f | MRR = %.2f | Prec@1 = %.2f | ' %
(result['map'], result['mrr'], result['prec@1']) +
'Prec@3 = %.2f | Prec@5 = %.2f | examples = %d | ' %
(result['prec@3'], result['prec@5'], examples) +
'time elapsed = %.2f (s)' %
(eval_time.time()))
else:
logger.info('valid official: Epoch = %d | MAP = %.2f | ' %
(global_stats['epoch'], result['map']) +
'MRR = %.2f | Prec@1 = %.2f | Prec@3 = %.2f | ' %
(result['mrr'], result['prec@1'], result['prec@3']) +
'Prec@5 = %.2f | examples = %d | valid time = %.2f (s)' %
(result['prec@5'], examples, eval_time.time()))
return result | 09385e491c25ac238aebabe7d887f73b4c0bd091 | 13,767 |
def tuple_list_to_lua(tuple_list):
"""Given a list of tuples, return a lua table of tables"""
def table(it):
return "{" + ",".join(map(str, it)) + "}"
return table(table(t) for t in tuple_list) | 71ec1a29f5e23b8bf82867617fe157fbba4a2332 | 13,768 |
def reset_user_messages(request: Request):
"""
For given user reset his notifications.
"""
profile: Profile = get_object_or_404(Profile, user=request.user)
profile.messages = 0
profile.save()
return Response(status=status.HTTP_200_OK) | 628347dea707b0bd2ecc63cc004a3f62cb85e967 | 13,769 |
import functools
def define_scope(function, scope=None, *args, **kwargs):
"""
A decorator for functions that define TensorFlow operations. The wrapped
function will only be executed once. Subsequent calls to it will directly
return the result so that operations are added to the graph only once.
The operations added by the function live within a tf.variable_scope(). If
this decorator is used with arguments, they will be forwarded to the
variable scope. The scope name defaults to the name of the wrapped
function.
"""
attribute = '_cache_' + function.__name__
name = scope or function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(name, *args, **kwargs):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator | 988f2f711dc227bfe8df5c7074d354c37d079fdb | 13,770 |
from typing import Union
from typing import List
from typing import Dict
import yaml
from typing import OrderedDict
def load_yaml(fname: str) -> Union[List, Dict]:
"""Load a YAML file."""
try:
with open(fname, encoding='utf-8') as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file, Loader=SafeLineLoader) or OrderedDict()
except yaml.YAMLError as exc:
_LOGGER.error(exc)
raise HomeAssistantError(exc)
except UnicodeDecodeError as exc:
_LOGGER.error("Unable to read file %s: %s", fname, exc)
raise HomeAssistantError(exc) | 5fd0b9d2dea7d07b7bb98f6a9ae3ce98be3962e0 | 13,771 |
def fancy_vector(v):
"""
Returns a given 3-vector or array in a cute way on the shell, if you
use 'print' on the return value.
"""
return "\n / %5.2F \\\n" % (v[0]) + \
" | %5.2F |\n" % (v[1]) + \
" \\ %5.2F /\n" % (v[2]) | 2340f22aa87da00abad30b9946c374f34b38496d | 13,772 |
def findpath_split(seq, ss1, ss2, md, th = 5, w = None):
""" Calculate findpath barriers for smaller components.
Args:
seq: RNA sequence.
ss1: Structure 1.
ss2: Structure 2.
md: ViennaRNA model details.
th: Threshold of how many basepairs must change for an independent findpath run. Defaults to 5.
w: Findpath width. Defaults to None.
Returns:
path, barrier: The folding path and the barrier height.
WARNING: If path splitting actually took place, then energy values
given in the path data are only relative to the starting structure.
"""
pt1 = make_pair_table(ss1, base = 0, chars = list('.x'))
pt2 = make_pair_table(ss2, base = 0, chars = list('.x'))
mindiff = None
recurse = None
for ij in chain(common_exterior_bases(pt1, pt2),
common_basepairs(pt1, pt2)):
(i, j) = ij if isinstance(ij, tuple) else (ij, None)
st1O, st1I = split_struct(ss1, i, j, spacer = '...')
st2O, st2I = split_struct(ss2, i, j, spacer = '...')
do = RNA.bp_distance(st1O, st2O)
if do < th: continue
di = RNA.bp_distance(st1I, st2I)
if di < th: continue
diff = abs(di-do)
if mindiff is None or diff < mindiff:
mindiff = diff
seqO, seqI = split_struct(seq, i, j, spacer = 'NNN')
recurse = ((i, j),
(seqO, st1O, st2O),
(seqI, st1I, st2I))
elif mindiff is not None and diff > mindiff:
# No need to check the rest if we are getting worse.
break
if mindiff is not None:
pathO, _ = findpath_split(*recurse[1], md, th, w)
pathI, _ = findpath_split(*recurse[2], md, th, w)
return findpath_merge(pathO, pathI, *recurse[0])
else:
fpw = 4 * RNA.bp_distance(ss1, ss2) if w is None else w
return call_findpath(seq, ss1, ss2, md, w = fpw) | 2d52102df31dd014ac60e28c7258bff833353b6a | 13,773 |
def get_root_relative_url(url_path):
"""Remove the root page slug from the URL path"""
return _clean_rel_url('/'.join(url_path.split('/')[2:])) | 7aca9c0ec8856615fe1777117f44a259d7b597c7 | 13,774 |
def exclusion_windows_matching(match_peaks):
"""
Discard the occurrences of matching and non-matchign ions when they are found in the window
(+-losses_window_removal) around M-xx or free bases ions
"""
output_dic = match_peaks
for key in match_peaks:
if match_peaks[key]:
for t in match_peaks[key]:
mass_losses_list, new_list = find_losses_freebases(match_peaks[key][t][7:]), []
for ion in match_peaks[key][t][7:]:
# Keep ion losses and free bases matched in the MS2_matches list
if 'M-' not in ion[1] and len(ion[1].split('(')[0]) != 1:
flag, mz_ion = 1, np.float64(ion[2])
for mass_loss in mass_losses_list:
# Add the MS2 offset
mass_loss_offseted = mass_loss + ppm_range(mass_loss, MS2_ppm_offset)
# Check and discard any sequencing ion is found in the M-xx exclusion window
if mass_loss_offseted - args.losses_window_removal <= \
mz_ion <= mass_loss_offseted + args.losses_window_removal:
flag = 0
break
if flag == 1:
new_list.append(ion)
else:
new_list.append(ion)
output_dic[key].update({t: match_peaks[key][t][:7] + new_list})
return output_dic | 9c2b5bcdb283b197102d50fdd2aaa8eb49e2fc3b | 13,776 |
def any_of(elements):
"""
Check to see if the argument is contained in a list of possible elements.
:param elements: The elements to check the argument against in the predicate.
:return: A predicate to check if the argument is a constituent element.
"""
def predicate(argument):
return argument in elements
return predicate | adacf8fd632d25452d22dab0a8a439021083ec83 | 13,777 |
def find_year(films_lst: list, year: int):
""" Filter list of films by given year """
filtered_films_lst = [line for line in films_lst if line[1] == str(year)]
return filtered_films_lst | f4c11e09e76831afcf49154234dd57044536bce1 | 13,778 |
def func_BarPS(HA_Open, HA_Close, HA_PS_Lookback, PS_pct_level=[0.35, 0.5, 0.95, 0.97], combine=False):
"""
0. This function is for calculating price trend number of HA bar, by looking back HA_PS_Lookback HA bars,
according to the previous bars' distribution, find the range (i.e. -4,-3,-2,-1,0,1,2,3,4) of the current bar.
1. This function has 5 arguments (one optional) and returns 1 DataFrame as output.
2. Input arguements including:
(1) HA_Open: Dataframe
(2) HA_Close: DataFrame
(3) HA_PS_Lookback: int, number of bars to lookback.
(4) PS_pct_level: list, optional, default value is [0.35, 0.5, 0.95, 0.97]
(5) combine: boolean, optional, default value is False, calculating the up bar and down bar separately,
while combine=True calculates the up bar and down bar combined.
3. Output is 1 DataFrame
(1) HA_PS: Showed as -4,3,-2,-1,0,1,2,3,4, indicating the size of HA bars.
"""
# Initialize:
HA_num = len(HA_Open)
HA_PS = np.zeros_like(HA_Open)
HA_Open = HA_Open.values
HA_Close = HA_Close.values
# Main:
for i in range(HA_PS_Lookback, HA_num):
HA_Open_lb = HA_Open [i-HA_PS_Lookback:i]
HA_Close_1b = HA_Close[i-HA_PS_Lookback:i]
HA_PS_positive_level, HA_PS_negative_level = func_PS_Level(HA_Open_lb, HA_Close_1b, PS_pct_level, combine)
HA_range = HA_Close[i] - HA_Open[i]
if HA_range > 0:
HA_PS_temp = np.where(HA_range <= HA_PS_positive_level)[0] + 1
if len(HA_PS_temp) != 0:
HA_PS[i] = HA_PS_temp[0] - 1
else:
HA_PS[i] = len(HA_PS_positive_level) # -1
if HA_range < 0:
HA_PS_temp = np.where(HA_range >= HA_PS_negative_level)[0] + 1
if len(HA_PS_temp) != 0:
HA_PS[i] = -HA_PS_temp[0] + 1
else:
HA_PS[i] = -len(HA_PS_negative_level) # +1
HA_PS_df = pd.DataFrame(HA_PS, columns=['PS'])
return HA_PS_df | 8a57de8ee4e832afd6327afc808668d227bc2592 | 13,779 |
from typing import List
def filter_whitespace(stream: List[Part]) -> List[Part]:
"""Remove whitespace tokens"""
return flu(stream).filter(lambda x: x.token != Token.WHITESPACE).collect() | aa3b8d109b0d85db7c3aa286858426276afb80ba | 13,780 |
def merge_partial_dicts(interfaces_dict, partials_dict):
"""Merges partial interface into non-partial interface.
Args:
interfaces_dict: A dict of the non-partial interfaces.
partial_dict: A dict of partial interfaces.
Returns:
A merged dictionary of |interface_dict| with |partial_dict|.
"""
for interface_name, partial in partials_dict.iteritems():
interface = interfaces_dict.get(interface_name)
if not interface:
raise Exception('There is a partial interface, but the corresponding non-partial interface was not found.')
for member in _MEMBERS:
interface[member].extend(partial.get(member))
interface.setdefault(_PARTIAL_FILEPATH, []).append(partial[_FILEPATH])
return interfaces_dict | 7efc47325e1af5c06b19c1bba02ec8b53d9473e0 | 13,781 |
def gen_code_def_part(metadata):
"""生成代码中定义类的部分。
"""
class_def_dict = validate(metadata)
class_def_list = list(class_def_dict.values())
code = templates.t_def_all_class.render(class_def_list=class_def_list)
return code | df01fb69984a0ba471a6e5ac24bbecb0a622dd1b | 13,782 |
def clang_plusplus_frontend(input_file, args):
"""Generate LLVM IR from C++ language source(s)."""
compile_command = default_clang_compile_command(args)
compile_command[0] = llvm_exact_bin('clang++')
return compile_to_bc(input_file, compile_command, args) | 1b2701f3e0fac240843b302dd2056bac857ecb74 | 13,783 |
from django.db.models import Model
def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
self.pk_field_names = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.pk_field_names[lh_field.name] = rh_field.name
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.pk_field_names[self.source_field_name]))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
queryset._add_hints(instance=self.instance)
if self._db:
queryset = queryset.using(self._db)
return queryset._next_is_sticky().filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = fk.model._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
False,
)
def add(self, *objs, through_defaults=None):
self._remove_prefetched_objects()
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(
self.source_field_name, self.target_field_name, *objs,
through_defaults=through_defaults,
)
# If this is a symmetrical m2m relation to self, add the mirror
# entry in the m2m table. `through_defaults` aren't used here
# because of the system check error fields.E332: Many-to-many
# fields with intermediate tables must not be symmetrical.
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_prefetched_objects()
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(
sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db,
)
self._remove_prefetched_objects()
filters = self._build_remove_filters(super().get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db,
)
clear.alters_data = True
def set(self, objs, *, clear=False, through_defaults=None):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, through_defaults=through_defaults)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (
self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model) else obj
)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs, through_defaults=through_defaults)
set.alters_data = True
def create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj, through_defaults=through_defaults)
return new_obj
create.alters_data = True
def get_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj, through_defaults=through_defaults)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, *, through_defaults=None, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj, through_defaults=through_defaults)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs, through_defaults=None):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
through_defaults = through_defaults or {}
# If there aren't any objects, there is nothing to do.
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids.difference_update(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(
sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db,
)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**through_defaults, **{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(
sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db,
)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove. Either object instances, or primary
# keys of object instances.
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(
sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db,
)
target_model_qs = super().get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(
sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db,
)
return ManyRelatedManager | c9c45ae0eca4a913affab0ed832a0568e46d9a4c | 13,784 |
def grp_render_dashboard_module(context, module, index=None, subindex=None):
"""
Template tag that renders a given dashboard module, it takes a
``DashboardModule`` instance as first parameter and an integer ``index`` as
second parameter, that is the index of the module in the dashboard.
"""
module.init_with_context(context)
context.update({
'template': module.template,
'module': module,
'index': index,
'subindex': subindex,
'admin_url': reverse('%s:index' % get_admin_site_name(context)),
})
return context | 515bf427c4c39dc28479f6fe7121dc1cb542745e | 13,785 |
def serialize_routing(value, explicit_type=None):
"""Custom logic to find matching serialize implementation and
returns it's unique registration string key
:param value: instance to serialize
:param explicit_type: explicit serialization type for value
:return: str key to find proper serialize implementation
"""
value_type = data_type(value, explicit_type)
if DICT_DATA_TYPE.match(value_type):
return "dict"
if LIST_DATA_TYPE.match(value_type):
return "list"
if TUPLE_DATA_TYPE.match(value_type):
return "tuple"
return value_type | 792cb24fc68060fe7e24f064f411752c5d787c3d | 13,786 |
def get_projection_matricies(az, el, distance_ratio, roll = 0, focal_length=35, img_w=137, img_h=137):
"""
Calculate 4x3 3D to 2D projection matrix given viewpoint parameters.
Code from "https://github.com/Xharlie/DISN"
"""
F_MM = focal_length # Focal length
SENSOR_SIZE_MM = 32.
PIXEL_ASPECT_RATIO = 1. # pixel_aspect_x / pixel_aspect_y
RESOLUTION_PCT = 100.
SKEW = 0.
CAM_MAX_DIST = 1.75
CAM_ROT = np.asarray([[1.910685676922942e-15, 4.371138828673793e-08, 1.0],
[1.0, -4.371138828673793e-08, -0.0],
[4.371138828673793e-08, 1.0, -4.371138828673793e-08]])
# Calculate intrinsic matrix.
scale = RESOLUTION_PCT / 100
# print('scale', scale)
f_u = F_MM * img_w * scale / SENSOR_SIZE_MM
f_v = F_MM * img_h * scale * PIXEL_ASPECT_RATIO / SENSOR_SIZE_MM
# print('f_u', f_u, 'f_v', f_v)
u_0 = img_w * scale / 2
v_0 = img_h * scale / 2
K = np.matrix(((f_u, SKEW, u_0), (0, f_v, v_0), (0, 0, 1)))
# Calculate rotation and translation matrices.
# Step 1: World coordinate to object coordinate.
sa = np.sin(np.radians(-az))
ca = np.cos(np.radians(-az))
se = np.sin(np.radians(-el))
ce = np.cos(np.radians(-el))
R_world2obj = np.transpose(np.matrix(((ca * ce, -sa, ca * se),
(sa * ce, ca, sa * se),
(-se, 0, ce))))
# Step 2: Object coordinate to camera coordinate.
R_obj2cam = np.transpose(np.matrix(CAM_ROT))
R_world2cam = R_obj2cam * R_world2obj
cam_location = np.transpose(np.matrix((distance_ratio * CAM_MAX_DIST,
0,
0)))
T_world2cam = -1 * R_obj2cam * cam_location
# Step 3: Fix blender camera's y and z axis direction.
R_camfix = np.matrix(((1, 0, 0), (0, -1, 0), (0, 0, -1)))
R_world2cam = R_camfix * R_world2cam
T_world2cam = R_camfix * T_world2cam
RT = np.hstack((R_world2cam, T_world2cam))
# finally, consider roll
cr = np.cos(np.radians(roll))
sr = np.sin(np.radians(roll))
R_z = np.matrix(((cr, -sr, 0),
(sr, cr, 0),
(0, 0, 1)))
rot_mat = get_rotate_matrix(-np.pi / 2)
return K, R_z@RT@rot_mat | a8ef5852510982851487e349336e41e61f7b582e | 13,787 |
def pix_to_coord(edges, pix, interp="lin"):
"""Convert pixel coordinates to grid coordinates using the chosen
interpolation scheme."""
scale = interpolation_scale(interp)
interp_fn = interp1d(
np.arange(len(edges), dtype=float), scale(edges), fill_value="extrapolate"
)
return scale.inverse(interp_fn(pix)) | db9fcc47a273e9b39f6d5b6a39b59146866e5dd4 | 13,788 |
def create_action_urls(actions, model=None, **url_args):
"""
Creates a list of URLs for the given actions.
"""
urls = {}
if len(actions) > 0:
# Resolve the url_args values as attributes from the model
values = {}
for arg in url_args:
values[arg] = getattr(model, url_args[arg])
# Generate the URL for every action
for action in actions:
urls[action] = flask.url_for(actions[action], **values)
return urls | f26477e0d046bfe6f73f25b2b086fad3b05a2646 | 13,789 |
def check_vfvx(x0, fx, fx_args, dfx, dfx_args=None, delta=1e-5):
"""
Check derivatives of a (vectorized) vector or scalar function of a vector
variable.
"""
if x0.ndim != 2:
raise ValueError('The variable must have two dimensions!')
if dfx_args is None:
dfx_args = fx_args
dfx_a = dfx(x0, *dfx_args)
dfx_d = nm.zeros_like(dfx_a)
for ic in range(x0.shape[1]):
x = x0.copy()
x[:, ic] += delta
f1 = fx(x, *fx_args)
x = x0.copy()
x[:, ic] -= delta
f2 = fx(x, *fx_args)
dfx_d[:, ic] = 0.5 * (f1 - f2) / delta
error = nm.linalg.norm((dfx_a - dfx_d).ravel(), nm.inf)
print('analytical:', dfx_a)
print('difference:', dfx_d)
print('error:', error)
return dfx_a, dfx_d, error | a8cffcbf118394a1ea5d65835bade516035fe9fe | 13,790 |
def add_hovertool(p1, cr_traj, traj_src, sat_src, traj_df):
"""Adds a hovertool to the top panel of the data visualization tool plot."""
# Create the JS callback for vertical line on radar plots.
callback_htool = CustomJS(args={'traj_src':traj_src,'sat_src':sat_src}, code="""
const indices = cb_data.index["1d"].indices[0];
var data_traj = traj_src.data
var t_traj = data_traj['t']
const t_val = t_traj[indices]
var data_sat = sat_src.data;
var t_sat = data_sat['t']
t_sat[0] = t_val
t_sat[1] = t_val
sat_src.change.emit();
""")
# Add the hovertool for the satellite trajectory points on top panel, which are
# linked to the vertical line on the bottom panel.
htool_mode = ('vline' if max(traj_df['y'])-min(traj_df['y'])<=
(max(traj_df['x'])-min(traj_df['x'])) else 'hline')
tooltips1 = [("lat", "@lat"),("lon", "@lon"),('time','@t_str')]
p1.add_tools(HoverTool(renderers=[cr_traj],callback=callback_htool,
mode=htool_mode,tooltips=tooltips1))
return p1 | f45245df7cd81ee8f0fc486d460be0c4338fd921 | 13,791 |
def backpage_url_to_sitekey(url):
"""http://longisland.backpage.com/FemaleEscorts/s-mny-oo-chics-but-oo-nn-lik-oo-me-19/40317377"""
(scheme, netloc, path, params, query, fragment) = urlparse(url)
sitekey = netloc.split('.')[0]
return sitekey | 6efa6d0bf73ab297144a7c6a35dbba920f77789e | 13,792 |
import torch
def batch_eye_like(X: torch.Tensor):
"""Return batch of identity matrices like given batch of matrices `X`."""
return torch.eye(*X.shape[1:], out=torch.empty_like(X))[None, :, :].repeat(X.size(0), 1, 1) | 266ee5639ce303b81e2cb82892e64f37a09695ff | 13,793 |
def cal_occurence(correspoding_text_number_list):
"""
calcualte each occurence of a number in a list
"""
di = dict()
for i in correspoding_text_number_list:
i = str(i)
s = di.get(i, 0)
if s == 0:
di[i] = 1
else:
di[i] = di[i] + 1
return di | aafabc6abdf4bf1df1b8d9e23a4af375df3ac75b | 13,794 |
def subtract(v: Vector, w: Vector) -> Vector:
"""simple vector subtraction"""
assert len(v) == len(w), 'Vectors need to have the same length'
return [vi - wi for vi, wi in zip(v, w)] | c6f8a9b19e07206a4d2637557c721bd97ad56363 | 13,795 |
from re import I
def f1():
"""
Filtering 1D.
"""
# Get center of the filter
c = int((size - 1) / 2)
# Pad the flatten (1D array) image with wrapping
If = np.pad(I.flatten(), (c), 'wrap')
# Initialize the resulting image
Ir = np.zeros(If.shape)
# Apply 1D convulation in the image
for x in range(c, Ir.shape[0] - c):
Ir[x] = conv_point1d(If, filter, x, c)
# Remove padding
Ir = Ir[c:-c]
# Return the resulting image with original shape
return Ir.reshape(I.shape) | cc50f089148cdbaffbbdc7d6d734a066a6b08722 | 13,796 |
from typing import Optional
def _unify_data_and_user_kwargs(
data: 'LayerData',
kwargs: Optional[dict] = None,
layer_type: Optional[str] = None,
fallback_name: str = None,
) -> 'FullLayerData':
"""Merge data returned from plugins with options specified by user.
If ``data == (_data, _meta, _type)``. Then:
- ``kwargs`` will be used to update ``_meta``
- ``layer_type`` will replace ``_type`` and, if provided, ``_meta`` keys
will be pruned to layer_type-appropriate kwargs
- ``fallback_name`` is used if ``not _meta.get('name')``
.. note:
If a user specified both layer_type and additional keyword arguments
to viewer.open(), it is their responsibility to make sure the kwargs
match the layer_type.
Parameters
----------
data : LayerData
1-, 2-, or 3-tuple with (data, meta, layer_type) returned from plugin.
kwargs : dict, optional
User-supplied keyword arguments, to override those in ``meta`` supplied
by plugins.
layer_type : str, optional
A user-supplied layer_type string, to override the ``layer_type``
declared by the plugin.
fallback_name : str, optional
A name for the layer, to override any name in ``meta`` supplied by the
plugin.
Returns
-------
FullLayerData
Fully qualified LayerData tuple with user-provided overrides.
"""
_data, _meta, _type = _normalize_layer_data(data)
if layer_type:
# the user has explicitly requested this be a certain layer type
# strip any kwargs from the plugin that are no longer relevant
_meta = prune_kwargs(_meta, layer_type)
_type = layer_type
if kwargs:
# if user provided kwargs, use to override any meta dict values that
# were returned by the plugin. We only prune kwargs if the user did
# *not* specify the layer_type. This means that if a user specified
# both layer_type and additional keyword arguments to viewer.open(),
# it is their responsibility to make sure the kwargs match the
# layer_type.
_meta.update(prune_kwargs(kwargs, _type) if not layer_type else kwargs)
if not _meta.get('name') and fallback_name:
_meta['name'] = fallback_name
return (_data, _meta, _type) | c0d472ef60bf69d67ef50d435715bfabe11c229e | 13,797 |
def get_sentence_embeddings(data):
"""
data -> list: list of text
"""
features = temb.batch_tokenize(data, tokenizer)
dataset = temb.prepare_dataset(features)
embeddings = temb.compute_embeddings(dataset, model)
return embeddings | cb1badd5cf9a244d7af8b40d219a467d0dff811e | 13,798 |
def sample_user(phone="+989123456789", full_name="testname"):
""" Create a sample user """
return get_user_model().objects.create_user(phone=phone,
full_name=full_name) | 418b12b4249c4beda4fed36664f2c9eb14f8adc4 | 13,799 |
import ctypes
def getVanHoveDistances(positions, displacements, L):
"""
Compte van Hove distances between particles of a system of size `L', with
`positions' and `displacements'.
Parameters
----------
positions : (*, 2) float array-like
Positions of the particles.
displacements : (*, 2) float array-like
Displacements of the particles.
L : float
Size of the system box.
Returns
-------
distances : (*^2,) float Numpy array
Van Hove distances.
"""
positions = np.array(positions, dtype=np.double)
N = len(positions)
assert positions.shape == (N, 2)
displacements = np.array(displacements, dtype=np.double)
assert displacements.shape == (N, 2)
distances = np.empty((N**2,), dtype=np.double)
_pycpp.getVanHoveDistances.argtypes = [
ctypes.c_int,
ctypes.c_double,
np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS')]
_pycpp.getVanHoveDistances(
N,
L,
np.ascontiguousarray(positions[:, 0]),
np.ascontiguousarray(positions[:, 1]),
np.ascontiguousarray(displacements[:, 0]),
np.ascontiguousarray(displacements[:, 1]),
np.ascontiguousarray(distances))
return distances | a66150cfe238b151f098733d4570c438f1c93906 | 13,801 |
from typing import Any
from typing import Union
from typing import List
def plot_local_coordinate_system_matplotlib(
lcs,
axes: plt.Axes.axes = None,
color: Any = None,
label: str = None,
time: Union[pd.DatetimeIndex, pd.TimedeltaIndex, List[pd.Timestamp]] = None,
time_ref: pd.Timestamp = None,
time_index: int = None,
show_origin: bool = True,
show_trace: bool = True,
show_vectors: bool = True,
) -> plt.Axes.axes:
"""Visualize a `weldx.transformations.LocalCoordinateSystem` using matplotlib.
Parameters
----------
lcs : weldx.transformations.LocalCoordinateSystem
The coordinate system that should be visualized
axes : matplotlib.axes.Axes
The target matplotlib axes. If `None` is provided, a new one will be created
color : Any
An arbitrary color. The data type must be compatible with matplotlib.
label : str
Name of the coordinate system
time : pandas.DatetimeIndex, pandas.TimedeltaIndex, List[pandas.Timestamp], or \
LocalCoordinateSystem
The time steps that should be plotted
time_ref : pandas.Timestamp
A reference timestamp that can be provided if the ``time`` parameter is a
`pandas.TimedeltaIndex`
time_index : int
Index of a specific time step that should be plotted
show_origin : bool
If `True`, the origin of the coordinate system will be highlighted in the
color passed as another parameter
show_trace :
If `True`, the trace of a time dependent coordinate system will be visualized in
the color passed as another parameter
show_vectors : bool
If `True`, the the coordinate axes of the coordinate system are visualized
Returns
-------
matplotlib.axes.Axes :
The axes object that was used as canvas for the plot
"""
if axes is None:
_, axes = plt.subplots(subplot_kw={"projection": "3d", "proj_type": "ortho"})
if lcs.is_time_dependent and time is not None:
lcs = lcs.interp_time(time, time_ref)
if lcs.is_time_dependent and time_index is None:
for i, _ in enumerate(lcs.time):
draw_coordinate_system_matplotlib(
lcs,
axes,
color=color,
label=label,
time_idx=i,
show_origin=show_origin,
show_vectors=show_vectors,
)
label = None
else:
draw_coordinate_system_matplotlib(
lcs,
axes,
color=color,
label=label,
time_idx=time_index,
show_origin=show_origin,
show_vectors=show_vectors,
)
if show_trace and lcs.coordinates.values.ndim > 1:
coords = lcs.coordinates.values
if color is None:
color = "k"
axes.plot(coords[:, 0], coords[:, 1], coords[:, 2], ":", color=color)
return axes | 2df77f0e3343f6ac541ff37991208fb894b44660 | 13,802 |
def saved_searches_list(request):
"""
Renders the saved_searches_list html
"""
args = get_saved_searches_list(request.user)
return render('saved_searches_list.html', args, request) | a2f92c06733113f05501cb242d2ee2bad91917be | 13,803 |
def get_active_loan_by_item_pid(item_pid):
"""Return any active loans for the given item."""
return search_by_pid(
item_pid=item_pid,
filter_states=current_app.config.get(
"CIRCULATION_STATES_LOAN_ACTIVE", []
),
) | 6922336876fddd72ce7655bf2cfee298fdc4a766 | 13,805 |
from typing import Set
from re import X
def _get_szymkiewicz_simpson_coefficient(a: Set[X], b: Set[X]) -> float:
"""Calculate the Szymkiewicz–Simpson coefficient.
.. seealso:: https://en.wikipedia.org/wiki/Overlap_coefficient
"""
if a and b:
return len(a.intersection(b)) / min(len(a), len(b))
return 0.0 | 42d39edf9fa2465605717e0892bcbca05df7799b | 13,806 |
def data_splitter(
input: pd.DataFrame,
) -> Output(train=pd.DataFrame, test=pd.DataFrame,):
"""Splits the input dataset into train and test slices."""
train, test = train_test_split(input, test_size=0.1, random_state=13)
return train, test | ddcc28b4430a8901fcde540cf321d4ea43f123d7 | 13,807 |
def reload(module, exclude=('sys', 'os.path', 'builtins', '__main__',
'numpy', 'numpy._globals')):
"""Recursively reload all modules used in the given module. Optionally
takes a list of modules to exclude from reloading. The default exclude
list contains sys, __main__, and __builtin__, to prevent, e.g., resetting
display, exception, and io hooks.
"""
global found_now
for i in exclude:
found_now[i] = 1
try:
with replace_import_hook(deep_import_hook):
return deep_reload_hook(module)
finally:
found_now = {} | c97fd1942dae583ff236ed73d33b53b685cafd32 | 13,808 |
def get_words_for_board(words, board_size, packing_constant=1.1):
"""Pick a cutoff which is just beyond limit of the board size."""
# Order the words by length. It's easier to pack shorter words, so prioritize them.
# This is SUPER hacky, should have a Word class that handles these representational differences.
words = sorted(words, key=lambda w: len(w.replace(" ", "").replace("-", "")))
cum_len = np.cumsum([len(word.replace(" ", "").replace("-", "")) for word in words])
num_words = None
for word_idx, cum_letters in enumerate(cum_len):
# Try to pack in slightly more letters than would fit on the word without overlaps,
# as governed by the packing constant.
if cum_letters > packing_constant * board_size**2:
num_words = word_idx
break
if not num_words:
raise ValueError(f"Too few semantic neighbor words to pack a {board_size}x{board_size} board.")
return words[:num_words] | e5f74806fa15c1f1fbe78e0ac218d6d808611dfe | 13,809 |
def booleans(key, val):
"""returns ucsc formatted boolean"""
if val in (1, True, "on", "On", "ON"):
val = "on"
else:
val = "off"
return val | f210a2ce6b998e65d2e5934f1318efea0f96c709 | 13,810 |
def merge_param_classes(*cls_list,
merge_positional_params: bool = True) -> type(Params):
"""
Merge multiple Params classes into a single merged params class and return the merged class.
Note that this will not flatten the nested classes.
:param cls_list: A list of Params subclasses or classes to merge into a single
Params class
:param merge_positional_params: Whether or not to merge the positional params in the classes
"""
if len(cls_list) == 1:
return cls_list[0]
class MergedParams(Params):
__doc__ = f'A Combination of {len(cls_list)} Params Classes:\n'
append_params_attributes(MergedParams, *cls_list)
for params_cls in cls_list:
MergedParams.__doc__ += f'\n\t {params_cls.__name__} - {params_cls.__doc__}'
# resolve positional arguments:
if merge_positional_params:
params_to_delete, positional_param = _merge_positional_params(
[(k, v) for k, v in MergedParams.__dict__.items() if not k.startswith('_')])
if positional_param is None and params_to_delete == []:
return MergedParams
setattr(MergedParams, 'positionals', positional_param)
positional_param.__set_name__(MergedParams, 'positionals')
for k in params_to_delete:
delattr(MergedParams, k)
return MergedParams | c42907652f971d7cd6d208017b8faaacacddb5b2 | 13,811 |
import random
import collections
def make_pin_list(eff_cnt):
"""Generates a pin list with an effect pin count given by eff_cnt."""
cards = [1] * eff_cnt
cards.extend([0] * (131 - len(cards)))
random.shuffle(cards)
deck = collections.deque(cards)
pin_list = []
for letters, _ in KEY_WHEEL_DATA:
pins = [c for c in letters if deck.pop()]
pin_list.append(''.join(pins))
return pin_list | 2c15a09928231993f09a373354ee29723463280d | 13,812 |
import select
def drop(cols, stmt):
"""
Function: Drops columns from the statement.
Input: List of columns to drop.
Output: Statement with columns that are not dropped.
"""
col_dict = column_dict(stmt)
col_names = [c for c in col_dict.keys()]
colintention = [c.evaluate(stmt).name if isinstance(c, Intention) else c for c in cols]
new_cols = list(filter(lambda c: c not in colintention, col_names))
undrop = select(new_cols, stmt)
return undrop | 73ecf35077824281a5ebc4e26776b963e0cb378e | 13,813 |
def ConvertVolumeSizeString(volume_size_gb):
"""Converts the volume size defined in the schema to an int."""
volume_sizes = {
"500 GB (128 GB PD SSD x 4)": 500,
"1000 GB (256 GB PD SSD x 4)": 1000,
}
return volume_sizes[volume_size_gb] | b1f90e5ded4d543d88c4f129ea6ac03aeda0c04d | 13,814 |
def render_template_with_system_context(value):
"""
Render provided template with a default system context.
:param value: Template string.
:type value: ``str``
:param context: Template context.
:type context: ``dict``
"""
context = {
SYSTEM_KV_PREFIX: KeyValueLookup(),
}
rendered = render_template(value=value, context=context)
return rendered | 6df2e7a652595b35919638791aae5465258edf0f | 13,815 |
def ToTranslation(tree, placeholders):
"""Converts the tree back to a translation, substituting the placeholders
back in as required.
"""
text = tree.ToString()
assert text.count(PLACEHOLDER_STRING) == len(placeholders)
transl = tclib.Translation()
for placeholder in placeholders:
index = text.find(PLACEHOLDER_STRING)
if index > 0:
transl.AppendText(text[:index])
text = text[index + len(PLACEHOLDER_STRING):]
transl.AppendPlaceholder(placeholder)
if text:
transl.AppendText(text)
return transl | 36fca25dfc78e0f37ddc6193a17f2d29c6192228 | 13,816 |
import torch
def complex(real, imag):
"""Return a 'complex' tensor
- If `fft` module is present, returns a propert complex tensor
- Otherwise, stack the real and imaginary compoenents along the last
dimension.
Parameters
----------
real : tensor
imag : tensor
Returns
-------
complex : tensor
"""
if _torch_has_complex:
return torch.complex(real, imag)
else:
return torch.stack([real, imag], -1) | 272a293e3918e5e067f251a7dae10a4d2c56abf4 | 13,817 |
def get_snps(x: str) -> tuple:
"""Parse a SNP line and return name, chromsome, position."""
snp, loc = x.split(' ')
chrom, position = loc.strip('()').split(':')
return snp, chrom, int(position) | 52672c550c914d70033ab45fd582fb9e0f97f023 | 13,818 |
def qr_match(event, context, user=None):
"""
Function used to associate a given QR code with the given email
"""
user_coll = coll('users')
result = user_coll.update_one({'email': event["link_email"]}, {'$push': {'qrcode': event["qr_code"]}})
if result.matched_count == 1:
return {"statusCode": 200, "body": "success"}
else:
return {"statusCode": 404, "body": "User not found"} | 7af48bc9fc97d34eb182eb8f429d93396079db87 | 13,819 |
def update_has_started(epoch, settings):
"""
Tells whether update has started or not
:param epoch: epoch number
:param settings: settings dictionary
:return: True if the update has started, False otherwise
"""
return is_baseline_with_update(settings['baseline']) and epoch >= settings['update']['start_epoch'] | d2d2c8d7d8de0a13414a116121fb3cec47bc1d3f | 13,820 |
import torch
def compute_i_th_moment_batches(input, i):
"""
compute the i-th moment for every feature map in the batch
:param input: tensor
:param i: the moment to be computed
:return:
"""
n, c, h, w = input.size()
input = input.view(n, c, -1)
mean = torch.mean(input, dim=2).view(n, c, 1, 1)
eps = 1e-5
var = torch.var(input, dim=2).view(n, c, 1, 1) + eps
std = torch.sqrt(var)
if i == 1:
return mean
elif i == 2:
return std
else:
sol = ((input.view(n, c, h, w) - mean.expand(n, c, h, w)) / std).pow(i)
sol = torch.mean(sol.view(n, c, -1), dim=2).view(n, c, 1, 1)
return sol | 2ab3b7bfd34b482cdf55d5a066b57852182b5b6a | 13,821 |
def hs_online_check(onion, put_url):
"""Online check for hidden service."""
try:
print onion
return hs_http_checker(onion, put_url)
except Exception as error:
print "Returned nothing."
print error
return "" | 19b7b2f45581e2bdb907d416be1885f569841a86 | 13,822 |
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
names=None, subplots=True, newfig=True, **kwargs):
"""
Plot the data in a file.
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots if *subplots* is *True*
(the default), or for lines in a single subplot if *subplots*
is *False*.
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, e.g., integer
column numbers in both or column names in both. If *subplots*
is *False*, then including any function such as 'semilogy'
that changes the axis scaling will set the scaling for all
columns.
- *comments*: the character used to indicate the start of a comment
in the file, or *None* to switch off the removal of comments
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *delimiter*: is the character(s) separating row items
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
If *newfig* is *True*, the plot always will be made in a new figure;
if *False*, it will be made in the current figure if one exists,
else in a new figure.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'),
plotfuncs={'volume': 'semilogy'})
Note: plotfile is intended as a convenience for quickly plotting
data from flat files; it is not intended as an alternative
interface to general plotting with pyplot or matplotlib.
"""
if newfig:
fig = figure()
else:
fig = gcf()
if len(cols) < 1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = {}
with cbook._suppress_matplotlib_deprecation_warning():
r = mlab._csv2rec(fname, comments=comments, skiprows=skiprows,
checkrows=checkrows, delimiter=delimiter,
names=names)
def getname_val(identifier):
'return the name and column data for identifier'
if isinstance(identifier, str):
return identifier, r[identifier]
elif isinstance(identifier, Number):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
ynamelist = []
if len(cols) == 1:
ax1 = fig.add_subplot(1, 1, 1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_ylabel(xname)
else:
N = len(cols)
for i in range(1, N):
if subplots:
if i == 1:
ax = ax1 = fig.add_subplot(N - 1, 1, i)
else:
ax = fig.add_subplot(N - 1, 1, i, sharex=ax1)
elif i == 1:
ax = fig.add_subplot(1, 1, 1)
yname, y = getname_val(cols[i])
ynamelist.append(yname)
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
if subplots:
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if not subplots:
ax.legend(ynamelist)
if xname == 'date':
fig.autofmt_xdate() | 493fccdf7d3661b9acffd22dbfd5799126a3d4f8 | 13,823 |
def retrieve(object_type, **kwargs):
"""Get objects from the Metatlas object database.
This will automatically select only objects created by the current
user unless `username` is provided. Use `username='*'` to search
against all users.
Parameters
----------
object_type: string
The type of object to search for (i.e. "Groups").
**kwargs
Specific search queries (i.e. name="Sargasso").
Use '%' for wildcard patterns (i.e. description='Hello%').
If you want to match a '%' character, use '%%'.
Returns
-------
objects: list
List of Metatlas Objects meeting the criteria. Will return the
latest version of each object.
"""
workspace = Workspace.get_instance()
out = workspace.retrieve(object_type, **kwargs)
workspace.close_connection()
return out | 54d35c23dd92ad65c5911d8c451b5b1fcbd131da | 13,825 |
def read_plot_pars() :
"""
Parameters are (in this order):
Minimum box width,
Maximum box width,
Box width iterations,
Minimum box length,
Maximum box length,
Box length iterations,
Voltage difference
"""
def extract_parameter_from_string(string):
#returns the part of the string after the ':' sign
parameter = ""
start_index = string.find(':')
for i in range(start_index+1, len(string)-1):
parameter += string[i]
return parameter
f = open("input.txt", "r")
pars = []
line_counter = 0
for line in f:
if ((line_counter > 0) and (line_counter < 8)):
pars.append(extract_parameter_from_string(line))
line_counter += 1
return pars | c78dc8e2a86b20eb6007850a70c038de5bf9f841 | 13,826 |
import typing
def create(subscribe: typing.Subscription) -> Observable:
"""Creates an observable sequence object from the specified
subscription function.
.. marble::
:alt: create
[ create(a) ]
---1---2---3---4---|
Args:
subscribe: Subscription function.
Returns:
An observable sequence that can be subscribed to via the given
subscription function.
"""
return Observable(subscribe) | 79c149545475a7686f8f8dffaed8f343604dd4aa | 13,827 |
def tocl(d):
"""Generate TOC, in-page links to the IDs we're going to define below"""
anchors = sorted(d.keys(), key=_lower)
return TemplateData(t='All The Things', e=[a for a in anchors]) | 8c27c42f05e4055a8e195d4d352345acc7821bae | 13,828 |
def get_upper_parentwidget(widget, parent_position: int):
"""This function replaces this:
self.parentWidget().parentWidget().parentWidget()
with this:
get_upper_parentwidget(self, 3)
:param widget: QWidget
:param parent_position: Which parent
:return: Wanted parent widget
"""
while parent_position > 0:
widget = widget.parentWidget()
parent_position -= 1
else:
return widget | ff010f3d9e000cfa3c58160e150c858490f2412d | 13,829 |
def DirectorySizeAsString(directory):
"""Returns size of directory as a string."""
return SizeAsString(DirectorySize(directory)) | 3e3d3b029da40502c2f0e7e5867786d586ad8109 | 13,830 |
def patch_is_tty(value):
""" Wrapped test function will have peltak.core.shell.is_tty set to *value*. """
def decorator(fn): # pylint: disable=missing-docstring
@wraps(fn)
def wrapper(*args, **kw): # pylint: disable=missing-docstring
is_tty = shell.is_tty
shell.is_tty = value
try:
return fn(*args, **kw)
finally:
shell.is_tty = is_tty
return wrapper
return decorator | 77655d32a5572824978910a12378a54d83b7e81e | 13,831 |
import torch
import math
def uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = "linear"):
"""
An initaliser which preserves output variance for approximately gaussian
distributed inputs. This boils down to initialising layers using a uniform
distribution in the range `(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)`, where
`dim[0]` is equal to the input dimension of the parameter and the `scale`
is a constant scaling factor which depends on the non-linearity used.
See `Random Walk Initialisation for Training Very Deep Feedforward Networks
<https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_
for more information.
# Parameters
tensor : `torch.Tensor`, required.
The tensor to initialise.
nonlinearity : `str`, optional (default = `"linear"`)
The non-linearity which is performed after the projection that this
tensor is involved in. This must be the name of a function contained
in the `torch.nn.functional` package.
# Returns
The initialised tensor.
"""
size = 1.0
# Estimate the input size. This won't work perfectly,
# but it covers almost all use cases where this initialiser
# would be expected to be useful, i.e in large linear and
# convolutional layers, as the last dimension will almost
# always be the output size.
for dimension in list(tensor.size())[:-1]:
size *= dimension
activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor)
max_value = math.sqrt(3 / size) * activation_scaling
return tensor.data.uniform_(-max_value, max_value) | aa14ec45c389c55c141d9bffd6ef370313fdf446 | 13,832 |
def get_results(elfFile):
"""Converts and returns collected data."""
staticSizes = parseElf(elfFile)
romSize = sum([size for key, size in staticSizes.items() if key.startswith("rom_")])
ramSize = sum([size for key, size in staticSizes.items() if key.startswith("ram_")])
results = {
"rom": romSize,
"rom_rodata": staticSizes["rom_rodata"],
"rom_code": staticSizes["rom_code"],
"rom_misc": staticSizes["rom_misc"],
"ram": ramSize,
"ram_data": staticSizes["ram_data"],
"ram_zdata": staticSizes["ram_zdata"],
}
return results | b60052f702e53655ab1a109ea2bb039e78aabaf5 | 13,833 |
def path_element_to_dict(pb):
"""datastore.entity_pb.Path_Element converter."""
return {
'type': pb.type(),
'id': pb.id(),
'name': pb.name(),
} | 2a4e757dedf6707dc412248f84b377c2f375e70c | 13,834 |
def get_orr_tensor(struct):
""" Gets orientation of all molecules in the struct """
molecule_list = get_molecules(struct)
orr_tensor = np.zeros((len(molecule_list),3,3))
for i,molecule_struct in enumerate(molecule_list):
orr_tensor[i,:,:] = get_molecule_orientation(molecule_struct)
return orr_tensor | faf42cf76168191835d9dd354ae9bc03198829ad | 13,836 |
def make_request_for_quotation(supplier_data=None):
"""
:param supplier_data: List containing supplier data
"""
supplier_data = supplier_data if supplier_data else get_supplier_data()
rfq = frappe.new_doc('Request for Quotation')
rfq.transaction_date = nowdate()
rfq.status = 'Draft'
rfq.company = '_Test Company'
rfq.message_for_supplier = 'Please supply the specified items at the best possible rates.'
for data in supplier_data:
rfq.append('suppliers', data)
rfq.append("items", {
"item_code": "_Test Item",
"description": "_Test Item",
"uom": "_Test UOM",
"qty": 5,
"warehouse": "_Test Warehouse - _TC",
"schedule_date": nowdate()
})
rfq.submit()
return rfq | ee0663231fc0bb06f6f43fa5abecdced048c1458 | 13,837 |
def mlrPredict(W, data):
"""
mlrObjFunction predicts the label of data given the data and parameter W
of Logistic Regression
Input:
W: the matrix of weight of size (D + 1) x 10. Each column is the weight
vector of a Logistic Regression classifier.
X: the data matrix of size N x D
Output:
label: vector of size N x 1 representing the predicted label of
corresponding feature vector given in data matrix
"""
label = np.zeros((data.shape[0], 1))
row = data.shape[0]
##################
# YOUR CODE HERE #
##################
# HINT: Do not forget to add the bias term to your input data
# Adding biases
biases = np.full((row,1),1)
X = np.concatenate((biases,data), axis=1)
t = np.sum(np.exp(np.dot(X,W)),axis=1)
t = t.reshape(t.shape[0],1)
theta_value = np.exp(np.dot(X,W))/t
label = np.argmax(theta_value,axis=1)
label = label.reshape(row,1)
return label | a37359433b020eb625b37ea57cb15282c4f82c8d | 13,838 |
def add(n):
"""Add 1."""
return n + 1 | c62cee4660540ae62b5b73369bdeb56ccb0088d6 | 13,840 |
def _area(x1, y1, x2, y2, x3, y3):
"""Heron's formula."""
a = np.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))
b = np.sqrt(pow(x3 - x2, 2) + pow(y3 - y2, 2))
c = np.sqrt(pow(x1 - x3, 2) + pow(y3 - y1, 2))
s = (a + b + c) / 2
return np.sqrt(s * (s - a) * (s - b) * (s - c)) | 456ffe56a76fbea082939c278b5f0f2ebaf8c395 | 13,842 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.