content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Tuple
def _find_clusters(
data,
cluster_range: Tuple[int, int] = None,
metric: str = "silhouette_score",
target=None,
**kwargs,
):
"""Finds the optimal number of clusters for K-Means clustering using the selected metric.
Args:
data: The data.
cluster_range: A tuple of the minimum and maximum cluster
search range. Defaults to (2, 20).
metric: The metric to optimize (from sklearn.metrics).
target: (For supervised clustering) The labels, as a 1-D array.
**kwargs: Keyword arguments to be passed into the K-Means estimator.
Raises:
ValueError: Max of cluster range greater than the min.
Returns:
clusters, KMeansFit
"""
cluster_range = cluster_range or (2, 20)
if not cluster_range[0] < cluster_range[1]:
raise ValueError(
"cluster_range expected to be (min_cluster, max_cluster), but the min was >= the max"
)
unsupervised_metrics = [
"silhouette_score",
"davies_bouldin_score",
"calinski_harabasz_score",
]
scores = []
widgets = []
for n in range(*cluster_range):
clusterwidget = _fit_kmeans(data, n, **kwargs)
analysis_func = getattr(sklearn.metrics, metric)
if metric in unsupervised_metrics:
score = analysis_func(data, clusterwidget.clusters)
else:
if target is None:
raise ValueError("'target' must be specified for supervised clustering")
score = analysis_func(target, clusterwidget.clusters)
scores.append(score)
widgets.append(clusterwidget)
best_idx = np.argmax(scores)
clusterwidget = widgets[best_idx]
clusterwidget.search = True
clusterwidget.cluster_range = cluster_range
clusterwidget.metric = metric
clusterwidget.scores = scores
if target is not None:
clusterwidget.target = target
return clusterwidget | a73afd74a6401799b6418e45372aee04cf353cb3 | 21,248 |
def _gate_objectives_li_pe(basis_states, gate, H, c_ops):
"""Objectives for two-qubit local-invariants or perfect-entangler
optimizaton"""
if len(basis_states) != 4:
raise ValueError(
"Optimization towards a two-qubit gate requires 4 basis_states"
)
# Bell states as in "Theorem 1" in
# Y. Makhlin, Quantum Inf. Process. 1, 243 (2002)
psi1 = (basis_states[0] + basis_states[3]) / np.sqrt(2)
psi2 = (1j * basis_states[1] + 1j * basis_states[2]) / np.sqrt(2)
psi3 = (basis_states[1] - basis_states[2]) / np.sqrt(2)
psi4 = (1j * basis_states[0] - 1j * basis_states[3]) / np.sqrt(2)
return [
Objective(initial_state=psi, target=gate, H=H, c_ops=c_ops)
for psi in [psi1, psi2, psi3, psi4]
] | 76be659f97396384102706fe0bc101a7d85d6521 | 21,249 |
from typing import Generator
import pkg_resources
def get_pip_package_list(path: str) -> Generator[pkg_resources.Distribution, None, None]:
"""Get the Pip package list of a Python virtual environment.
Must be a path like: /project/venv/lib/python3.9/site-packages
"""
packages = pkg_resources.find_distributions(path)
return packages | 9e73e27c2b50186dedeedd1240c28ef4f4d50e03 | 21,250 |
from OpenGL.GLU import gluGetString, GLU_EXTENSIONS
def hasGLUExtension( specifier ):
"""Given a string specifier, check for extension being available"""
if not AVAILABLE_GLU_EXTENSIONS:
AVAILABLE_GLU_EXTENSIONS[:] = gluGetString( GLU_EXTENSIONS )
return specifier.replace(as_8_bit('.'),as_8_bit('_')) in AVAILABLE_GLU_EXTENSIONS | cf938ec4d0ec16ae96faa10c50ac5b4bc541a062 | 21,251 |
def do_slots_information(parser, token):
"""Calculates some context variables based on displayed slots.
"""
bits = token.contents.split()
len_bits = len(bits)
if len_bits != 1:
raise TemplateSyntaxError(_('%s tag needs no argument') % bits[0])
return SlotsInformationNode() | e52d724abb435c1b8cba68c352977a1d6c1e1c12 | 21,252 |
def get_region_of_interest(img, sx=0.23, sy=0.15, delta=200, return_vertices=False):
"""
:param img: image to extract ROI from
:param sx: X-axis factor for ROI bottom base
:param sy: Y-axis factor for ROI top base
:param delta: ROI top base length
:param return_vertices: whether to return the ROI vertices
:return: ROI (optional: vertices)
"""
assert len(img.shape) == 2
h, w = img.shape
mask = np.zeros(img.shape)
fill_color = 255
vertices = np.array(
[
[0.5 * (w - delta), sy * h],
[0.5 * (w + delta), sy * h],
[(1 - sx) * w, h - 1],
[sx * w, h - 1],
]
)
cv2.fillPoly(mask, np.array([vertices], dtype=np.int32), fill_color)
roi = mask.astype(np.uint8) & img.astype(np.uint8)
if return_vertices:
return roi, vertices
else:
return roi | 932588f34ba9cd7e4e71b35df60cf03f40574fad | 21,253 |
from typing import Counter
import json
def load_search_freq(fp=SEARCH_FREQ_JSON):
"""
Load the search_freq from JSON file
"""
try:
with open(fp, encoding="utf-8") as f:
return Counter(json.load(f))
except FileNotFoundError:
return Counter() | 5d5e1d1106a88379eab43ce1e533a7cbb5da7eb6 | 21,254 |
def _sum_of_squares(a, axis=0):
"""
Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis) | 5271d40b096e4f6f47e010bf0974bc77804a3108 | 21,255 |
import logging
def get_preprocess_fn(pp_pipeline, remove_tpu_dtypes=True):
"""Transform an input string into the preprocessing function.
The minilanguage is as follows:
fn1|fn2(arg, arg2,...)|...
And describes the successive application of the various `fn`s to the input,
where each function can optionally have one or more arguments, which are
either positional or key/value, as dictated by the `fn`.
The output preprocessing function expects a dictinary as input. This
dictionary should have a key "image" that corresponds to a 3D tensor
(height x width x channel).
Args:
pp_pipeline: A string describing the pre-processing pipeline.
remove_tpu_dtypes: Whether to remove TPU incompatible types of data.
Returns:
preprocessing function.
Raises:
ValueError: if preprocessing function name is unknown
"""
def _preprocess_fn(data):
"""The preprocessing function that is returned."""
# Validate input
if not isinstance(data, dict):
raise ValueError("Argument `data` must be a dictionary, "
"not %s" % str(type(data)))
# Apply all the individual steps in sequence.
logging.info("Data before pre-processing:\n%s", data)
for fn_name in pp_pipeline.split("|"):
data = eval(fn_name)(data) # pylint: disable=eval-used
if remove_tpu_dtypes:
# Remove data that are TPU-incompatible (e.g. filename of type tf.string).
for key in list(data.keys()):
if data[key].dtype not in TPU_SUPPORTED_DTYPES:
tf.logging.warning(
"Removing key '{}' from data dict because its dtype {} is not in "
" the supported dtypes: {}".format(key, data[key].dtype,
TPU_SUPPORTED_DTYPES))
del data[key]
logging.info("Data after pre-processing:\n%s", data)
return data
return _preprocess_fn | ef3065252b3aa67cebc6a041eba33711e7a17f82 | 21,256 |
def nodeset(v):
"""Convert a value to a nodeset."""
if not nodesetp(v):
raise XPathTypeError, "value is not a node-set"
return v | ccaada2ad8610e0b3561663aab8e90665f6c23de | 21,257 |
import tqdm
def get_char_embs(char_emb_path, char_emb_size, alphabet_size=1422):
"""Get pretrained character embeddings and a dictionary mapping characters to their IDs.
Skips IDs 0 and 1, since these are reserved for PAD and UNK, respectively.
Input:
char_emb_path: path to glove.840B.{char_embedding_size}d-char.txt. If None, use random initialization.
char_embedding_size: Size of character embeddings
Returns:
char_emb_matrix: Numpy array shape (1426, char_embedding_size) containing char embeddings.
char2id: dict. Maps chars (string) to their IDs (int).
"""
print("Loading char embeddings from file: {}...".format(char_emb_path))
char_emb_matrix = []
char2id = {}
idx = 0
with open(char_emb_path, 'r') as fh:
for line in tqdm(fh, total=alphabet_size):
line = line.lstrip().rstrip().split(" ")
char = line[0]
vector = list(map(float, line[1:]))
if char_emb_size != len(vector):
raise Exception("Expected vector of size {}, but got vector of size {}.".format(char_emb_size, len(vector)))
char_emb_matrix.append(vector)
char2id[char] = idx
idx += 1
char_emb_matrix = np.array(char_emb_matrix, dtype=np.float32)
print("Loaded char embedding matrix with shape {}.".format(char_emb_matrix.shape))
return char_emb_matrix, char2id | d4be3ed7780efb3ca378c18d805ff7c5550d98d7 | 21,258 |
def _get_reverse_complement(seq):
"""
Get the reverse compliment of a DNA sequence.
Parameters:
-----------
seq
Returns:
--------
reverse_complement_seq
Notes:
------
(1) No dependencies required. Pure python.
"""
complement_seq = ""
for i in seq:
if i == "C":
complement_seq += "G"
elif i == "G":
complement_seq += "C"
elif i == "A":
complement_seq += "T"
elif i == "T":
complement_seq += "A"
elif i == "N":
complement_seq += "N"
reverse_complement_seq = complement_seq[::-1]
return reverse_complement_seq | 31408767c628ab7b0e6e63867e37f11eb6e19560 | 21,259 |
def wave_reduce_min_all(val):
"""
All threads get the result
"""
res = wave_reduce_min(val)
return broadcast(res, 0) | dfac75ecd9aeb75dc37cbaa7d04ce2a2732b9ce9 | 21,260 |
def predict_class(all_headlines):
"""
Predict whether each headline is negative or positive.
:param all_headlines: all headlines
:return: headlines with predictions
"""
clf, v = load_classifier("SVM")
headlines = []
for h in all_headlines:
headlines.append(h.to_array())
df = pd.DataFrame(headlines)
df.columns = \
[
'headline',
'origin',
'semantic_value',
'pos',
'neg',
'neu',
'published_at'
]
df['headline'] = df['headline'].map(lambda x: strip_punctuation(x))
df['headline'] = df['headline'].map(lambda x: x.lower())
df['headline'] = df['headline'].map(lambda x: filter_stop_words(x))
df['published_at'] = df['published_at'].map(lambda x: to_epoch(x))
df = normalise_column(df, 'published_at')
tr_counts = v.transform(df['headline'])
tr = pd.DataFrame(tr_counts.todense())
df.join(tr)
output = clf.predict(df.drop(["headline", "origin"], axis=1)).astype(int)
df['predicted_class'] = output
i = 0
for h in all_headlines:
h.predicted_class = df['predicted_class'].loc[i]
i += 1
return all_headlines | 38839eba678659529b7fe83d6dc09ffd3cf87e48 | 21,261 |
def find_tickets_for_seat_manager(
user_id: UserID, party_id: PartyID
) -> list[DbTicket]:
"""Return the tickets for that party whose respective seats the user
is entitled to manage.
"""
return db.session \
.query(DbTicket) \
.filter(DbTicket.party_id == party_id) \
.filter(DbTicket.revoked == False) \
.filter(
(
(DbTicket.seat_managed_by_id == None) &
(DbTicket.owned_by_id == user_id)
) |
(DbTicket.seat_managed_by_id == user_id)
) \
.options(
db.joinedload(DbTicket.occupied_seat),
) \
.all() | c59af6629a402f3844e01c5dd86553b8e5d33d64 | 21,262 |
import inspect
from typing import Counter
def insert_features_from_iters(dataset_path, insert_features, field_names, **kwargs):
"""Insert features into dataset from iterables.
Args:
dataset_path (str): Path of the dataset.
insert_features (iter of iter): Collection of iterables representing
features.
field_names (iter): Collection of field names to insert. These must
match the order of their attributes in the insert_features items.
**kwargs: Arbitrary keyword arguments. See below.
Keyword Args:
use_edit_session (bool): Flag to perform updates in an edit session.
Default is False.
log_level (str): Level to log the function at. Defaults to 'info'.
Returns:
str: Path of the dataset updated.
"""
kwargs.setdefault('use_edit_session', False)
log = leveled_logger(LOG, kwargs.get('log_level', 'info'))
log("Start: Insert features into %s from iterables.", dataset_path)
meta = {'dataset': dataset_metadata(dataset_path)}
keys = {'row': tuple(contain(field_names))}
if inspect.isgeneratorfunction(insert_features):
insert_features = insert_features()
session = Editor(meta['dataset']['workspace_path'], kwargs['use_edit_session'])
cursor = arcpy.da.InsertCursor(dataset_path, field_names=keys['row'])
feature_count = Counter()
with session, cursor:
for row in insert_features:
cursor.insertRow(tuple(row))
feature_count['inserted'] += 1
log("%s features inserted.", feature_count['inserted'])
log("End: Insert.")
return feature_count | d6f4547b33a09391188beb96cf408f3148ef643e | 21,263 |
def check_table(conn, table, interconnect):
"""
searches if Interconnect exists in table in database
:param conn: connect instance for database
:param table: name of table you want to check
:param interconnect: name of the Interconnect you are looking for
:return: results of SQL query searching for table
"""
cur = conn.cursor()
sql_search = "SELECT * \
FROM %s \
WHERE Interconnect='%s'" % (table, interconnect)
found = cur.execute(sql_search).fetchone()
return found | 0888146d5dfe20e7bdfbfe078c58e86fda43d6a5 | 21,264 |
import tarfile
def get_host_config_tar_response(host):
"""
Build the tar.gz attachment response for the GetHostConfig view.
Note: This is re-used to download host config from the admin interface.
:returns: HttpResponseAttachment
"""
filename = '{host}_v{version}.tar.gz'.format(
host=host.path_str(),
version=host.config_version)
# Use the response as file-like object to write the tar
resp = HttpResponseAttachment(filename=filename, content_type='application/gzip')
with closing(tarfile.open(mode='w:gz', fileobj=resp)) as tar:
config_tar.generate_host_config_tar(host, TarWriter(tar))
return resp | 8a968885bb197f781faf65abf100aa40568f6354 | 21,265 |
async def update_product_remove_tag_by_id(
*,
product_id: int,
session: Session = Depends(get_session),
db_product: Product = Depends(get_product_or_404),
db_tag: Tag = Depends(get_tag_or_404),
):
"""
Remove tag from product
"""
existing_product = db_product["db_product"]
existing_tag = db_tag["db_tag"]
try:
existing_product.tags.remove(existing_tag)
session.add(existing_product)
session.commit()
session.refresh(existing_product)
profiling_api(
f"Product:update:remove_tag:by_id:{product_id}",
db_product["start_time"],
db_product["username"],
)
except Exception as message:
logger.error(message)
logger.exception(message)
raise HTTPException(
status_code=404,
detail="Impossible to remove the tag: product or tag not existing",
)
return existing_product | 41893e64fa02f24df26ed39128657218cbc87231 | 21,266 |
def hard_sigmoid(x: tf.Tensor) -> tf.Tensor:
"""Hard sigmoid activation function.
```plot-activation
activations.hard_sigmoid
```
# Arguments
x: Input tensor.
# Returns
Hard sigmoid activation.
"""
return tf.clip_by_value(x+0.5, 0.0, 1.0) | 203a41d52888b42b643df84986c5fbc8967222c6 | 21,268 |
def get_image_as_np_array(filename: str):
"""Returns an image as an numpy array
"""
img = Image.open(filename)
return np.asarray(img) | 8d3cc1c5311e675c6c710cbd7633a66748308e7d | 21,269 |
def unreduced_coboundary(morse_complex, akq, cell_ix):
""" Helper """
return unreduced_cells(akq, morse_complex.get_coboundary(cell_ix)) | c074a9b7df35f961e66e31a88c8a7f95f48912c7 | 21,270 |
from typing import Union
def __align(obj: Union[Trace, EventLog], pt: ProcessTree, max_trace_length: int = 1,
max_process_tree_height: int = 1, parameters=None):
"""
this function approximates alignments for a given event log or trace and a process tree
:param obj: event log or single trace
:param pt: process tree
:param max_trace_length: specifies when the recursive splitting stops based on the trace's length
:param max_process_tree_height: specifies when the recursive splitting stops based on the tree's height
:return:
"""
assert isinstance(pt, ProcessTree)
if isinstance(obj, Trace):
e = EventLog()
e.append(obj)
obj = e
assert isinstance(obj, EventLog)
pt = process_tree_to_binary_process_tree(pt)
pt = EfficientTree(pt)
parameters[Parameters.SUBTREE_ALIGN_CACHE] = {}
return __approximate_alignments_for_log(obj, pt, max_trace_length, max_process_tree_height,
parameters=parameters) | 0f684403bb70a158c463b4babcada115e908ee88 | 21,271 |
import resource
def scanProgramTransfersCount(program, transfersCount=None, address=None, args={}):
"""
Scan pools by active program, sort by transfersCount
"""
return resource.scan(**{**{
'type': 'pool',
'index': 'activeProgram',
'indexValue': program,
'sort': 'transfersCount',
'sortValue': transfersCount,
'keyValue': address,
}, **args}) | b40a0ff2ea62f840a6c2fd858516dc8998aac30b | 21,272 |
from pedal.tifa.commands import get_issues
from pedal.tifa.feedbacks import initialization_problem
def def_use_error(node, report=MAIN_REPORT):
"""
Checks if node is a name and has a def_use_error
Args:
node (str or AstNode or CaitNode): The Name node to look up.
report (Report): The report to attach data to. Defaults to MAIN_REPORT.
Returns:
True if the given name has a def_use_error
"""
if not isinstance(node, str) and node.ast_name != "Name":
raise TypeError
def_use_issues = get_issues(initialization_problem)
if not isinstance(node, str):
node_id = node.id
else:
node_id = node
has_error = False
for issue in def_use_issues:
name = issue.fields['name']
if name == node_id:
has_error = True
break
return has_error | 6e0113c451a2c09fdb84392060b672ffb3bc19d3 | 21,273 |
def get_ref_inst(ref):
"""
If value is part of a port on an instance, return that instance,
otherwise None.
"""
root = ref.root()
if not isinstance(root, InstRef):
return None
return root.inst | 55f1a84131451a2032b7012b00f9336f12fee554 | 21,274 |
def not_found(error):
"""
Renders 404 page
:returns: HTML
:rtype: flask.Response
"""
view_args["title"] = "Not found"
return render_template("404.html", args=view_args), 404 | 8882f171c5e68f3b24a1a7bd57dbd025a4b3a070 | 21,275 |
def xml_escape(x):
"""Paranoid XML escaping suitable for content and attributes."""
res = ''
for i in x:
o = ord(i)
if ((o >= ord('a')) and (o <= ord('z'))) or \
((o >= ord('A')) and (o <= ord('Z'))) or \
((o >= ord('0')) and (o <= ord('9'))) or \
i in ' !#$%()*+,-./:;=?@\^_`{|}~':
res += i
else:
res += '&#%d;' % o
return res | 018dc7d1ca050641b4dd7198e17911b8d17ce5fc | 21,276 |
def read_tab(filename):
"""Read information from a TAB file and return a list.
Parameters
----------
filename : str
Full path and name for the tab file.
Returns
-------
list
"""
with open(filename) as my_file:
lines = my_file.readlines()
return lines | 8a6a6b0ec693130da7f036f4673c89f786dfb230 | 21,277 |
def build_model(stage_id, batch_size, real_images, **kwargs):
"""Builds progressive GAN model.
Args:
stage_id: An integer of training stage index.
batch_size: Number of training images in each minibatch.
real_images: A 4D `Tensor` of NHWC format.
**kwargs: A dictionary of
'start_height': An integer of start image height.
'start_width': An integer of start image width.
'scale_base': An integer of resolution multiplier.
'num_resolutions': An integer of number of progressive resolutions.
'stable_stage_num_images': An integer of number of training images in
the stable stage.
'transition_stage_num_images': An integer of number of training images
in the transition stage.
'total_num_images': An integer of total number of training images.
'kernel_size': Convolution kernel size.
'colors': Number of image channels.
'to_rgb_use_tanh_activation': Whether to apply tanh activation when
output rgb.
'fmap_base': Base number of filters.
'fmap_decay': Decay of number of filters.
'fmap_max': Max number of filters.
'latent_vector_size': An integer of latent vector size.
'gradient_penalty_weight': A float of gradient norm target for
wasserstein loss.
'gradient_penalty_target': A float of gradient penalty weight for
wasserstein loss.
'real_score_penalty_weight': A float of Additional penalty to keep the
scores from drifting too far from zero.
'adam_beta1': A float of Adam optimizer beta1.
'adam_beta2': A float of Adam optimizer beta2.
'generator_learning_rate': A float of generator learning rate.
'discriminator_learning_rate': A float of discriminator learning rate.
Returns:
An inernal object that wraps all information about the model.
"""
kernel_size = kwargs['kernel_size']
colors = kwargs['colors']
resolution_schedule = make_resolution_schedule(**kwargs)
num_blocks, num_images = get_stage_info(stage_id, **kwargs)
current_image_id = tf.train.get_or_create_global_step()
current_image_id_inc_op = current_image_id.assign_add(batch_size)
tf.summary.scalar('current_image_id', current_image_id)
progress = networks.compute_progress(
current_image_id, kwargs['stable_stage_num_images'],
kwargs['transition_stage_num_images'], num_blocks)
tf.summary.scalar('progress', progress)
real_images = networks.blend_images(
real_images, progress, resolution_schedule, num_blocks=num_blocks)
def _num_filters_fn(block_id):
"""Computes number of filters of block `block_id`."""
return networks.num_filters(block_id, kwargs['fmap_base'],
kwargs['fmap_decay'], kwargs['fmap_max'])
def _generator_fn(z):
"""Builds generator network."""
to_rgb_act = tf.tanh if kwargs['to_rgb_use_tanh_activation'] else None
return networks.generator(
z,
progress,
_num_filters_fn,
resolution_schedule,
num_blocks=num_blocks,
kernel_size=kernel_size,
colors=colors,
to_rgb_activation=to_rgb_act)
def _discriminator_fn(x):
"""Builds discriminator network."""
return networks.discriminator(
x,
progress,
_num_filters_fn,
resolution_schedule,
num_blocks=num_blocks,
kernel_size=kernel_size)
########## Define model.
z = make_latent_vectors(batch_size, **kwargs)
gan_model = tfgan.gan_model(
generator_fn=lambda z: _generator_fn(z)[0],
discriminator_fn=lambda x, unused_z: _discriminator_fn(x)[0],
real_data=real_images,
generator_inputs=z)
########## Define loss.
gan_loss = define_loss(gan_model, **kwargs)
########## Define train ops.
gan_train_ops, optimizer_var_list = define_train_ops(gan_model, gan_loss,
**kwargs)
gan_train_ops = gan_train_ops._replace(
global_step_inc_op=current_image_id_inc_op)
########## Generator smoothing.
generator_ema = tf.train.ExponentialMovingAverage(decay=0.999)
gan_train_ops, generator_vars_to_restore = add_generator_smoothing_ops(
generator_ema, gan_model, gan_train_ops)
class Model(object):
pass
model = Model()
model.stage_id = stage_id
model.batch_size = batch_size
model.resolution_schedule = resolution_schedule
model.num_images = num_images
model.num_blocks = num_blocks
model.current_image_id = current_image_id
model.progress = progress
model.num_filters_fn = _num_filters_fn
model.generator_fn = _generator_fn
model.discriminator_fn = _discriminator_fn
model.gan_model = gan_model
model.gan_loss = gan_loss
model.gan_train_ops = gan_train_ops
model.optimizer_var_list = optimizer_var_list
model.generator_ema = generator_ema
model.generator_vars_to_restore = generator_vars_to_restore
return model | d188ef5672e928b1935a97ade3d26614eb700681 | 21,278 |
def int2(c):
""" Parse a string as a binary number """
return int(c, 2) | dd1fb1f4c194e159b227c77c4246136863646707 | 21,279 |
from typing import Any
from typing import Type
from typing import List
from typing import Dict
def from_serializer(
serializer: serializers.Serializer,
api_type: str,
*,
id_field: str = "",
**kwargs: Any,
) -> Type[ResourceObject]:
"""
Generate a schema from a DRF serializer.
:param serializer: The serializer instance.
:param api_type: The JSON API resource type.
:param id_field: The 'id" field of the resource.
If left empty, it is either "id" for non-model serializers, or
for model serializers, it is looked up on the model.
:param kwargs: Extra options (like links and transforms) passed to the schema.
:return: The new schema class.
"""
# get_fields() should return them in the order of Meta.fields
serializer_name = type(serializer).__name__
attrs: List[str] = []
rels: List[str] = []
if not id_field:
# If this is a model serializer, we can reach in to the model
# and look for the model's PK.
if isinstance(serializer, serializers.ModelSerializer):
model = serializer.Meta.model
for db_field in model._meta.get_fields():
if getattr(db_field, "primary_key", False):
id_field = db_field.attname
break
if not id_field:
raise ValueError(f"Unable to find primary key from model: {model}")
else:
# Otherwise, just assume it's "id"
id_field = "id"
for field_name, field in serializer.get_fields().items():
if field_name != id_field:
if isinstance(field, serializers.RelatedField):
rels.append(field_name)
else:
attrs.append(field_name)
values: Dict[str, Any] = {
"id": id_field,
"type": api_type,
"attributes": attrs,
"relationships": rels,
}
values.update(**kwargs)
return type(f"{serializer_name}_AutoSchema", (ResourceObject,), values) | 4fb2c0fb83c26d412de5582a8ebfeb4c72ac7add | 21,280 |
def inv_rotate_pixpts(pixpts_rot, angle):
"""
Inverse rotate rotated pixel points to their original positions.
Keyword arguments:
pixpts_rot -- namedtuple of numpy arrays of x,y pixel points rotated
angle -- rotation angle in degrees
Return value:
pixpts -- namedtuple of numpy arrays of pixel x,y points in
original positions
"""
deg2rad = np.pi/180.
angle_rad = angle*deg2rad
xpix_pts = pixpts_rot.x*np.cos(angle_rad) + pixpts_rot.y*np.sin(angle_rad)
ypix_pts = -pixpts_rot.x*np.sin(angle_rad) + pixpts_rot.y*np.cos(angle_rad)
PixPoints = namedtuple('PixPoints', 'x y')
pixpts = PixPoints(xpix_pts, ypix_pts)
return pixpts | 793b148a0c37d321065dc590343de0f4093abcff | 21,281 |
def properties(classes):
"""get all property (p-*, u-*, e-*, dt-*) classnames
"""
return [c.partition("-")[2] for c in classes if c.startswith("p-")
or c.startswith("u-") or c.startswith("e-") or c.startswith("dt-")] | 417562d19043f4b98068ec38cc010061b612fef3 | 21,283 |
import array
def adapt_p3_histogram(codon_usages, purge_unwanted=True):
"""Returns P3 from each set of codon usage for feeding to hist()."""
return [array([c.positionalGC(purge_unwanted=True)[3] for c in curr])\
for curr in codon_usages] | d5b0b0b387c3a98f584ca82dad79effbb9aa7a31 | 21,284 |
def handle_logout_response(response):
"""
Handles saml2 logout response.
:param response: Saml2 logout response
"""
if len(response) > 1:
# Currently only one source is supported
return HttpResponseServerError("Logout from several sources not supported")
for entityid, logout_info in response.items():
if isinstance(logout_info, tuple):
# logout_info is a tuple containing header information and a HTML message.
binding, http_info = logout_info
if binding == BINDING_HTTP_POST:
# Display content defined in logout response
body = "".join(http_info["data"])
return HttpResponse(body)
elif binding == BINDING_HTTP_REDIRECT:
# Redirect to address defined in logout response
return HttpResponseRedirect(_get_location(http_info))
else:
# Unknown binding
return HttpResponseServerError("Logout binding not supported")
else: # result from logout, should be OK
pass
return HttpResponseServerError("Failed to log out") | 18d8983a3e01905e1c7c6b41b65eb7e9191a4bf5 | 21,285 |
def get_value_beginning_of_year(idx, col, validate=False):
"""
Devuelve el valor de la serie determinada por df[col] del
primer día del año del índice de tiempo 'idx'.
"""
beggining_of_year_idx = date(year=idx.date().year, month=1, day=1)
return get_value(beggining_of_year_idx, col, validate) | b3a267620f19cabe1492aea671d34f1142580a5d | 21,286 |
from typing import List
def doc2vec_embedder(corpus: List[str], size: int = 100, window: int = 5) -> List[float]:
"""
Given a corpus of texts, returns an embedding (representation
of such texts) using a fine-tuned Doc2Vec embedder.
ref: https://radimrehurek.com/gensim/models/doc2vec.html
"""
logger.info(f"Training Doc2Vec with: size={size}, window={window}")
tagged_documents = [TaggedDocument(doc.split(), [i]) for i, doc in enumerate(corpus)]
model = Doc2Vec(tagged_documents, vector_size=size, window=window, min_count=3, workers=16)
def embedder(documents: List[str]) -> List[float]:
"""Generates an embedding using a Doc2Vec"""
return scale_vectors([model.infer_vector(doc.split()) for doc in documents])
return embedder | e14eb3c1daca1c24f9ebeaa04f44091cc12b03ff | 21,287 |
def PremIncome(t):
"""Premium income"""
return SizePremium(t) * PolsIF_Beg1(t) | 1673f5a18171989e15bdfd7fa3e814f8732fd732 | 21,288 |
def _setter_name(getter_name):
""" Convert a getter name to a setter name.
"""
return 'set' + getter_name[0].upper() + getter_name[1:] | d4b55afc10c6d79a1432d2a8f3077eb308ab0f76 | 21,289 |
def get_bel_node_by_pathway_name():
"""Get Reactome related eBEL nodes by pathway name."""
pathway_name = request.args.get('pathway_name')
sql = f'''SELECT
@rid.asString() as rid,
namespace,
name,
bel,
reactome_pathways
FROM
protein
WHERE
pure=true AND
"{pathway_name}" in reactome_pathways
'''
return _get_paginated_ebel_query_result(sql) | 930bb79f70c050acaa052d684de389fc2eee9c36 | 21,290 |
def get_model(model_file, log=True):
"""Load a model from the specified model_file."""
model = load_model(model_file)
if log:
print('Model successfully loaded on rank ' + str(hvd.rank()))
return model | ad699c409588652ac98da0f29b2cb25c53216a46 | 21,291 |
def _sample_weight(kappa, dim, num_samples):
"""Rejection sampling scheme for sampling distance from center on
surface of the sphere.
"""
dim = dim - 1 # since S^{n-1}
b = dim / (np.sqrt(4.0 * kappa ** 2 + dim ** 2) + 2 * kappa)
x = (1.0 - b) / (1.0 + b)
c = kappa * x + dim * np.log(1 - x ** 2)
results = []
n = 0
while True:
z = np.random.beta(dim / 2.0, dim / 2.0, size=num_samples)
w = (1.0 - (1.0 + b) * z) / (1.0 - (1.0 - b) * z)
u = np.random.uniform(low=0, high=1, size=num_samples)
mask = kappa * w + dim * np.log(1.0 - x * w) - c >= np.log(u)
results.append(w[mask])
n += sum(mask)
if n >= num_samples:
break
results = np.concatenate(results)[:num_samples]
return results | 5760bfe205468e9d662ad0e8d8afa641fa45db2c | 21,293 |
import torch
def variable_time_collate_fn3(
batch,
args,
device=torch.device("cpu"),
data_type="train",
data_min=None,
data_max=None,
):
"""
Expects a batch of time series data in the form of (record_id, tt, vals, mask, labels) where
- record_id is a patient id
- tt is a 1-dimensional tensor containing T time values of observations.
- vals is a (T, D) tensor containing observed values for D variables.
- mask is a (T, D) tensor containing 1 where values were observed and 0 otherwise.
- labels is a list of labels for the current patient, if labels are available. Otherwise None.
Returns:
combined_tt: The union of all time observations.
combined_vals: (M, T, D) tensor containing the observed values.
combined_mask: (M, T, D) tensor containing 1 where values were observed and 0 otherwise.
"""
D = batch[0][2].shape[1]
len_tt = [ex[1].size(0) for ex in batch]
maxlen = np.max(len_tt)
enc_combined_tt = torch.zeros([len(batch), maxlen]).to(device)
enc_combined_vals = torch.zeros([len(batch), maxlen, D]).to(device)
enc_combined_mask = torch.zeros([len(batch), maxlen, D]).to(device)
for b, (record_id, tt, vals, mask, labels) in enumerate(batch):
currlen = tt.size(0)
enc_combined_tt[b, :currlen] = tt.to(device)
enc_combined_vals[b, :currlen] = vals.to(device)
enc_combined_mask[b, :currlen] = mask.to(device)
enc_combined_vals, _, _ = utils.normalize_masked_data(
enc_combined_vals, enc_combined_mask, att_min=data_min, att_max=data_max
)
if torch.max(enc_combined_tt) != 0.0:
enc_combined_tt = enc_combined_tt / torch.max(enc_combined_tt)
data_dict = {
"observed_data": enc_combined_vals,
"observed_tp": enc_combined_tt,
"observed_mask": enc_combined_mask,
}
return data_dict | 5158f7ab642ab33100ec5fc1c044e20edd90687c | 21,294 |
import operator
def run_map_reduce(files, mapper, n):
"""Runner to execute a map-reduce reduction of cowrie log files using mapper and files
Args:
files (list of files): The cowrie log files to be used for map-reduce reduction.
mapper (MapReduce): The mapper processing the files using map_func and reduce_func.
n (int): We want the n most commands / ips / etc. of the cowrie log files.
Returns:
result (list): List of map-reduced cowrie log data.
"""
# main work
counts = mapper(files)
counts.sort(key=operator.itemgetter(1))
counts.reverse()
data = split_data_by_events(counts, n)
result = build_json(data)
return result | a46779fa5546c0e414a6dd4921f52c28cc80535e | 21,295 |
import select
def metadata_record_dictize(pkg, context):
"""
Based on ckan.lib.dictization.model_dictize.package_dictize
"""
model = context['model']
is_latest_revision = not(context.get('revision_id') or
context.get('revision_date'))
execute = _execute if is_latest_revision else _execute_with_revision
# package
if is_latest_revision:
if isinstance(pkg, model.PackageRevision):
pkg = model.Package.get(pkg.id)
result = pkg
else:
package_rev = model.package_revision_table
q = select([package_rev]).where(package_rev.c.id == pkg.id)
result = execute(q, package_rev, context).first()
if not result:
raise tk.ObjectNotFound
result_dict = d.table_dictize(result, context)
if result_dict.get('title'):
result_dict['title'] = result_dict['title'].strip()
result_dict['display_name'] = result_dict['title'] or result_dict['name'] or result_dict['id']
# extras
if is_latest_revision:
extra = model.package_extra_table
else:
extra = model.extra_revision_table
q = select([extra]).where(extra.c.package_id == pkg.id)
result = execute(q, extra, context)
result_dict['extras'] = ckan_model_dictize.extras_list_dictize(result, context)
return result_dict | f049faf30322d5d4da45e2a424a6977c894db67c | 21,296 |
def colorbar_set_label_parallel(cbar,label_list,hpos=1.2,vpos=-0.3,
ha='left',va='center',
force_position=None,
**kwargs):
"""
This is to set colorbar label besie the colorbar.
Parameters:
-----------
cbar: the colorbar used to set.
hpos: the left position of labels, used in vertical colorbar.
vpos: the below position of labels, used in horizontal colorbar.
force_position:
1. In case of a tuple, should be the fraction of the first small one
and the number of remaining equal-length sections. Eg., (0.3,12)
2. In case of a np.ndarray or list with values in the unit of axes
fraction, will be directly used to position the texts.
Example:
--------
/homel/ychao/python/script/set_label_parallel_colorbar.py
"""
def get_yloc(first,num):
"""
first is the fraction of the first small downward arrow; num is the
number of remaining equal-length sections on the colorbar.
"""
first_pos = first/2.
second_pos = np.arange(first + 0.5,num,1)
all_pos = np.array([first_pos] + list(second_pos))
return all_pos/(first+num)
cbar.set_ticklabels([])
cbar.ax.tick_params(right='off',left='off')
#get the text position.
yloc=(cbar.values-cbar.boundaries[0])/(cbar.boundaries[-1]-cbar.boundaries[0])
if force_position is not None:
if isinstance(force_position,(tuple)) and len(force_position) == 2:
yloc = get_yloc(*force_position)
elif isinstance(force_position,(np.ndarray,list)):
yloc = force_position
else:
raise ValueError("Cannot understand force_position")
if len(label_list) != len(yloc):
raise ValueError("the lenght of cbar segments and label list are not equal!")
else:
if cbar.orientation == 'vertical':
for label,ypos in zip(label_list,yloc):
cbar.ax.text(hpos,ypos,label,ha=ha,va=va,**kwargs)
elif cbar.orientation == 'horizontal':
for label,ypos in zip(label_list,yloc):
cbar.ax.text(ypos,vpos,label,ha=ha,va=va,**kwargs) | 811358f254b05d7fa243c96d91c94ed3cb1d1fcd | 21,298 |
def read_csv(file, tz):
"""
Reads the file into a pandas dataframe, cleans data and rename columns
:param file: file to be read
:param tz: timezone
:return: pandas dataframe
"""
ctc_columns = {1: 'unknown_1',
2: 'Tank upper', # temperature [deg C]
3: 'unknown_3',
4: 'Tank lower', # temperature [deg C]
5: 'unknown_5',
6: 'unknown_6',
7: 'Primary flow 1', # temperature [deg C]
8: 'Return flow', # temperature [deg C]
9: 'unknown_9',
10: 'Heater', # electric power [kW]
11: 'L1', # electric current [A]
12: 'L2', # electric current [A]
13: 'L3', # electric current [A]
14: 'unknown_14',
15: 'unknown_15',
16: 'unknown_16',
17: 'unknown_17',
18: 'unknown_18',
19: 'unknown_19',
20: 'unknown_20',
21: 'Charge pump', # speed [%]
22: 'unknown_22',
23: 'Heat pump flow', # temperature [deg C]
24: 'Heat pump return', # temperature [deg C]
25: 'unknown_25',
26: 'unknown_26',
27: 'unknown_27',
28: 'unknown_28',
29: 'unknown_29',
30: 'unknown_30',
31: 'unknown_31',
32: 'Compressor L1', # electric current [A]
33: 'Compressor' # on/off [-]
}
df = pd.read_csv(file, header=None, index_col=0, parse_dates=True, usecols=[i for i in range(34)])
df.index = df.index.tz_localize(tz, ambiguous='NaT')
df = df.loc[df.index.notnull()]
df = df.loc[~df.index.duplicated(keep='first')]
df.rename(columns=ctc_columns, inplace=True)
df['Compressor'] = np.where(df['Compressor'] == 'ON', 1, 0)
return df | 9e9ed864dcba6878562ae8686dab1d1f2650f5b3 | 21,299 |
def get_tokenizer_from_saved_model(saved_model: SavedModel) -> SentencepieceTokenizer:
"""
Get tokenizer from tf SavedModel.
:param SavedModel saved_model: tf SavedModel.
:return: tokenizer.
:rtype: SentencepieceTokenizer
"""
# extract functions that contain SentencePiece somewhere in there
functions_with_sp = [
f
for f in saved_model.meta_graphs[0].graph_def.library.function
if "sentencepiecetokenizeop" in str(f).lower()
]
assert len(functions_with_sp) == 1
# find SentencePieceOp (contains the model) in the found function
nodes_with_sp = [
n for n in functions_with_sp[0].node_def if n.op == "SentencepieceOp"
]
assert len(nodes_with_sp) == 1
# we can pretty much save the model into a file since it does not change
model = nodes_with_sp[0].attr["model"].s
# instantiate the model
tokenizer = SentencepieceTokenizer(model)
return tokenizer | 6b524f9f14e286aa6ef43fe77773f9ec6503cf75 | 21,300 |
import heapq
def heapq_merge(*iters, **kwargs):
"""Drop-in replacement for heapq.merge with key support"""
if kwargs.get('key') is None:
return heapq.merge(*iters)
def wrap(x, key=kwargs.get('key')):
return key(x), x
def unwrap(x):
_, value = x
return value
iters = tuple((wrap(x) for x in it) for it in iters)
return (unwrap(x) for x in heapq.merge(*iters)) | 0693f667fb6b495680066488347d9894e84f6f0a | 21,301 |
def parse_archive_links(html):
"""Parse the HTML of an archive links page."""
parser = _ArchiveLinkHTMLParser()
parser.feed(html)
return parser.archive_links | 7894052d602cbe0db195b6fb9a9c1252163d5266 | 21,303 |
import json
def processing_requests():
"""
Handles the request for what is in processing.
:return: JSON
"""
global processing
global processing_mutex
rc = []
response.content_type = "application/json"
with processing_mutex:
if processing:
rc.append(processing)
return json.dumps(rc) | 76334b997efb659fb9d7502ec14357e8e6660293 | 21,304 |
def detect_feature(a, b=None):
"""
Detect the feature used in a relay program.
Parameters
----------
a : Union[tvm.relay.Expr, tvm.IRModule]
The input expression or module.
b : Optional[Union[tvm.relay.Expr, tvm.IRModule]]
The input expression or module.
The two arguments cannot both be expression or module.
Returns
-------
features : Set[Feature]
Features used in the program.
"""
if isinstance(a, IRModule):
a, b = b, a
return {Feature(int(x)) for x in _ffi_api.detect_feature(a, b)} | 2b9bf11d9b37da7b4473a6da83867911b22586ec | 21,305 |
def get_urls_from_loaded_sitemapindex(sitemapindex):
"""Get all the webpage urls in a retrieved sitemap index XML"""
urls = set()
# for loc_elem in sitemapindex_elem.findall('/sitemap/loc'):
for loc_elem in sitemapindex.findall('//{http://www.sitemaps.org/schemas/sitemap/0.9}loc'):
urls.update(get_urls_from_sitemap(loc_elem.text))
for loc_elem in sitemapindex.findall('//loc'):
urls.update(get_urls_from_sitemap(loc_elem.text))
return urls | 1a94166272385768929e1db70b643293e7c325b5 | 21,306 |
def genLinesegsnp(verts, colors = [], thickness = 2.0):
"""
gen objmnp
:param objpath:
:return:
"""
segs = LineSegs()
segs.setThickness(thickness)
if len(colors) == 0:
segs.setColor(Vec4(.2, .2, .2, 1))
else:
segs.setColor(colors[0], colors[1], colors[2], colors[3])
for i in range(len(verts)-1):
segs.moveTo(verts[i][0], verts[i][1], verts[i][2])
segs.drawTo(verts[i+1][0], verts[i+1][1], verts[i+1][2])
objmnp = NodePath('linesegs')
objmnp.attachNewNode(segs.create())
objmnp.setTransparency(TransparencyAttrib.MAlpha)
return objmnp | 71fc5c936fbe5dfdc528fc14fd6c0dd10d15ff3c | 21,307 |
def enhance_puncta(img, level=7):
"""
Removing low frequency wavelet signals to enhance puncta.
Dependent on image size, try level 6~8.
"""
if level == 0:
return img
wp = pywt.WaveletPacket2D(data=img, wavelet='haar', mode='sym')
back = resize(np.array(wp['d'*level].data), img.shape, order=3, mode='reflect')/(2**level)
cimg = img - back
cimg[cimg < 0] = 0
return cimg | 7c05531bd85dd42296871f884a04cd30c187346e | 21,309 |
def thumbnail(img, size = (1000,1000)):
"""Converts Pillow images to a different size without modifying the original image
"""
img_thumbnail = img.copy()
img_thumbnail.thumbnail(size)
return img_thumbnail | 4eb49869a53d9ddd42ca8c184a12f0fedb8586a5 | 21,310 |
def calculate_new_ratings(P1, P2, winner, type):
"""
calculate and return the new rating/rating_deviation for both songs
Args:
P1 (tuple or float): rating data for song 1
P2 (tuple or float): rating data for song 2
winner (str): left or right
type (str): elo or glicko
Returns:
tuple: newly calculated ratings, rating_deviations
"""
s1, s2 = None, None
if winner == 'left':
s1, s2 = 1, 0
elif winner == 'right':
s1, s2 = 0, 1
if type == 'elo':
return calculate_elo(P1, P2, s1), calculate_elo(P2, P1, s2)
elif type == 'glicko':
return calculate_glicko_rating(P1, P2, s1), calculate_glicko_rating(P2, P1, s2) | 23853c6fd4d6a977e0c0f28b5665baebcab3ae86 | 21,311 |
def age(a):
"""age in yr - age(scale factor)"""
return _cosmocalc.age(a) | 7f4cb143c1b5e56f3f7b1ebc0a916a371070740d | 21,312 |
def _read_array(raster, band, bounds):
""" Read array from raster
"""
if bounds is None:
return raster._gdal_dataset.ReadAsArray()
else:
x_min, y_min, x_max, y_max = bounds
forward_transform = affine.Affine.from_gdal(*raster.geo_transform)
reverse_transform = ~forward_transform
px_min, py_max = reverse_transform * (x_min, y_min)
px_max, py_min = reverse_transform * (x_max, y_max)
x_size = int(px_max - px_min) + 1
y_size = int(py_max - py_min) + 1
if band is not None:
return raster._gdal_dataset.GetRasterBand(band).ReadAsArray(int(px_min),
int(py_min),
x_size,
y_size)
else:
return raster._gdal_dataset.ReadAsArray(int(px_min),
int(py_min),
x_size,
y_size) | 12ad55500950d89bdc84ab29157de9faac17e76a | 21,314 |
def makePlayerInfo(pl_name):
""" Recupere toutes les infos d'un player
:param arg1: nom du joueur
:type arg1: chaine de caracteres
:return: infos du player : budget, profit & ventes (depuis le debut de la partie), boissons a vendre ce jour
:rtype: Json
"""
info = calculeMoneyInfo(pl_name, 0)
drinkInfo = makeDrinkOffered(pl_name)
return ({ "cash" : info['cash'], "profit" : info['profit'], "sales" : info['sales'], "drinksOffered" : drinkInfo }) | 4ebc7f11397091fa3d0c62db7fcfd82720eac530 | 21,315 |
def _FinalizeHeaders(found_fields, headers, flags):
"""Helper to organize the final headers that show in the report.
The fields discovered in the user objects are kept separate from those
created in the flattening process in order to allow checking the found
fields against a list of those expected. Unexpected fields are identified.
If the report is a subset of all fields, the headers are trimmed.
Args:
found_fields: A set of the fields found in all the user objects.
headers: A set of the fields created in the flattening helpers.
Will return with the complete set of fields to be printed.
flags: Argparse flags object with csv_fields.
Returns:
Sorted list of headers.
"""
# Track known fields to notify user if/when fields change. A few are known
# but not printed (they are denormalized and replaced below):
expected_fields = set(_UserDictionaryParser.GetExpectedUserFields())
if found_fields > expected_fields:
unexpected_fields = ', '.join(found_fields - expected_fields)
log_utils.LogWarning(
'Unexpected user fields noticed: %s.' % unexpected_fields)
headers |= found_fields
headers -= set(['emails', 'name', 'nonEditableAliases'])
# Prune the headers reference object that is used outside this
# function by using discard() if a subset of fields is desired.
if flags.csv_fields:
extra_csv_fields = set(flags.csv_fields) - headers
if extra_csv_fields:
print '** Ignoring unknown csv_fields: %s.' % ', '.join(
sorted(extra_csv_fields))
for field in list(headers):
if field not in flags.csv_fields:
headers.discard(field)
return sorted(headers) | 9d44f10c4890ca48cc00f79b24e0019e346028d0 | 21,316 |
def get_outmost_points(contours):
"""Get the bounding rectangle of all the contours"""
all_points = np.concatenate(contours)
return get_bounding_rect(all_points) | 173631e3397226459d0bf3a91157d2e74660e506 | 21,318 |
def dhcp_release_packet(eth_dst='ff:ff:ff:ff:ff:ff',
eth_src='00:01:02:03:04:05',
ip_src='0.0.0.0',
ip_dst='255.255.255.255',
src_port=68,
dst_port=67,
bootp_chaddr='00:01:02:03:04:05',
bootp_ciaddr='1.2.3.4',
dhcp_server_ip='1.2.3.4'):
"""
Return a dhcp release packet
Supports a few parameters:
@param eth_dst Destination MAC, should be broadcast address
@param eth_src Source MAC, should be address of client
@param ip_src Source IP, should be default route IP address
@param ip_dst Destination IP, broadcast IP address
@param src_port Source Port, 68 for DHCP client
@param dst_port Destination Port, 67 for DHCP Server
@param bootp_chaddr MAC Address of client
@param bootp_ciaddr Client IP Address
@param dhcp_server_ip IP address of DHCP server
"""
pkt = scapy.Ether(dst=eth_dst, src=eth_src)/ \
scapy.IP(src=ip_src, dst=ip_dst)/ \
scapy.UDP(sport=src_port, dport=dst_port)/ \
scapy.BOOTP(chaddr=bootp_chaddr, ciaddr=bootp_ciaddr)/ \
scapy.DHCP(options=[('message-type', 'release'), ('server_id', dhcp_server_ip), ('end')])
return pkt | 63885cb982fbea5f5ff45c850b1bbf00e1154004 | 21,319 |
from typing import Optional
from datetime import datetime
def dcfc_30_e_plus_360(start: Date, asof: Date, end: Date, freq: Optional[Decimal] = None) -> Decimal:
"""
Computes the day count fraction for the "30E+/360" convention.
:param start: The start date of the period.
:param asof: The date which the day count fraction to be calculated as of.
:param end: The end date of the period (a.k.a. termination date).
:return: Day count fraction.
>>> ex1_start, ex1_asof = datetime.date(2007, 12, 28), datetime.date(2008, 2, 28)
>>> ex2_start, ex2_asof = datetime.date(2007, 12, 28), datetime.date(2008, 2, 29)
>>> ex3_start, ex3_asof = datetime.date(2007, 10, 31), datetime.date(2008, 11, 30)
>>> ex4_start, ex4_asof = datetime.date(2008, 2, 1), datetime.date(2009, 5, 31)
>>> round(dcfc_30_e_plus_360(start=ex1_start, asof=ex1_asof, end=ex1_asof), 14)
Decimal('0.16666666666667')
>>> round(dcfc_30_e_plus_360(start=ex2_start, asof=ex2_asof, end=ex2_asof), 14)
Decimal('0.16944444444444')
>>> round(dcfc_30_e_plus_360(start=ex3_start, asof=ex3_asof, end=ex3_asof), 14)
Decimal('1.08333333333333')
>>> round(dcfc_30_e_plus_360(start=ex4_start, asof=ex4_asof, end=ex4_asof), 14)
Decimal('1.33333333333333')
"""
## Get the new start date, if required:
if start.day == 31:
start = datetime.date(start.year, start.month, 30)
## Get the new asof date, if required:
if asof.day == 31:
asof = asof + datetime.timedelta(days=1)
## Compute number of days:
nod = (asof.day - start.day) + 30 * (asof.month - start.month) + 360 * (asof.year - start.year)
## Done, compute and return the day count fraction:
return nod / Decimal(360) | 99cc53d69eb1151056475967459be072cff4f773 | 21,321 |
def get_current_func_info_by_traceback(self=None, logger=None) -> None:
"""
通过traceback获取函数执行信息并打印
use eg:
class A:
def a(self):
def cc():
def dd():
get_current_func_info_by_traceback(self=self)
dd()
cc()
def b():
get_current_func_info_by_traceback()
aa = A()
aa.a()
b()
# -> A.a.cc.dd in line_num: 131 invoked
# -> <module>.b in line_num: 136 invoked
:param self: 类的self
:param logger:
:return:
"""
try:
extract_stack_info = extract_stack()
# pprint(extract_stack_info)
# 除类名外的函数名调用组合str
detail_func_invoked_info = ''
for item in extract_stack_info[1:-1]:
# extract_stack_info[1:-1]不包含get_current_func_info_by_traceback
tmp_str = '{}' if detail_func_invoked_info == '' else '.{}'
detail_func_invoked_info += tmp_str.format(item[2])
# print(detail_func_invoked_info)
# func_name = extract_stack_info[-2][2],
line_num = extract_stack_info[-2][1]
_print(msg='-> {}.{} in line_num: {} invoked'.format(
# class name
extract_stack_info[0][2] if self is None else self.__class__.__name__,
detail_func_invoked_info,
line_num,),
logger=logger,
log_level=1,)
except Exception as e:
_print(msg='遇到错误:', logger=logger, exception=e, log_level=2)
return None | c89496eb7303acb91ef64587d10d5b7350e9a00e | 21,322 |
def augment_timeseries_shift(x: tf.Tensor, max_shift: int = 10) -> tf.Tensor:
"""Randomly shift the time series.
Parameters
----------
x : tf.Tensor (T, ...)
The tensor to be augmented.
max_shift : int
The maximum shift to be randomly applied to the tensor.
Returns
-------
x : tf.Tensor
The augmented tensor.
"""
# shift the data by removing a random number of later time points
dt = tf.random.uniform(shape=[], minval=0, maxval=max_shift, dtype=tf.int32)
return x[:-dt, ...] | 2a9265ea72478d9c860f549637ea629e4b86f4f0 | 21,323 |
def endpoint(fun):
"""Decorator to denote a method which returns some result to the user"""
if not hasattr(fun, '_zweb_post'):
fun._zweb_post = []
fun._zweb = _LEAF_METHOD
fun._zweb_sig = _compile_signature(fun, partial=False)
return fun | 8050a6d1c6e23c1feeec4744edd45b7ae589aab8 | 21,324 |
import torch
def focal_prob(attn, batch_size, queryL, sourceL):
"""
consider the confidence g(x) for each fragment as the sqrt
of their similarity probability to the query fragment
sigma_{j} (xi - xj)gj = sigma_{j} xi*gj - sigma_{j} xj*gj
attn: (batch, queryL, sourceL)
"""
# -> (batch, queryL, sourceL, 1)
xi = attn.unsqueeze(-1).contiguous()
# -> (batch, queryL, 1, sourceL)
xj = attn.unsqueeze(2).contiguous()
# -> (batch, queryL, 1, sourceL)
xj_confi = torch.sqrt(xj)
xi = xi.view(batch_size*queryL, sourceL, 1)
xj = xj.view(batch_size*queryL, 1, sourceL)
xj_confi = xj_confi.view(batch_size*queryL, 1, sourceL)
# -> (batch*queryL, sourceL, sourceL)
term1 = torch.bmm(xi, xj_confi)
term2 = xj * xj_confi
funcF = torch.sum(term1-term2, dim=-1) # -> (batch*queryL, sourceL)
funcF = funcF.view(batch_size, queryL, sourceL)
fattn = torch.where(funcF > 0, torch.ones_like(attn),
torch.zeros_like(attn))
return fattn | 968baad0fa6f78b49eeca1056556a6c2ff3a9cef | 21,325 |
def get_fibonacci_iterative(n: int) -> int:
"""
Calculate the fibonacci number at position 'n' in an iterative way
:param n: position number
:return: position n of Fibonacci series
"""
a = 0
b = 1
for i in range(n):
a, b = b, a + b
return a | 0ece23b00d810ce1c67cf5434cf26e1e21685c20 | 21,326 |
def get_sample_content(filename):
"""Return sample content form file."""
with open(
"tests/xml/{filename}".format(
filename=filename), encoding="utf-8") as file:
return file.read() | 2ba60ad6473ec53f6488b42ceb7090b0f7c8f985 | 21,327 |
def create_contrasts(task):
"""
Create a contrasts list
"""
contrasts = []
contrasts += [('Go', 'T', ['GO'], [1])]
contrasts += [('GoRT', 'T', ['GO_rt'], [1])]
contrasts += [('StopSuccess', 'T', ['STOP_SUCCESS'], [1])]
contrasts += [('StopUnsuccess', 'T', ['STOP_UNSUCCESS'], [1])]
contrasts += [('StopUnsuccessRT', 'T', ['STOP_UNSUCCESS_rt'], [1])]
contrasts += [('Go-StopSuccess', 'T', ['GO', 'STOP_SUCCESS'], [1, -1])]
contrasts += [('Go-StopUnsuccess', 'T', ['GO', 'STOP_UNSUCCESS'], [1, -1])]
contrasts += [('StopSuccess-StopUnsuccess', 'T',
['STOP_SUCCESS', 'STOP_UNSUCCESS'], [1, -1])]
# add negative
repl_w_neg = []
for con in contrasts:
if '-' not in con[0]:
newname = 'neg_%s' % con[0]
else:
newname = "-".join(con[0].split("-")[::-1])
new = (newname, 'T', con[2], [-x for x in con[3]])
repl_w_neg.append(con)
repl_w_neg.append(new)
return repl_w_neg | 221b1b1ebcc6c8d0e2fcb32d004794d1b0a47522 | 21,328 |
def project(raster_path, boxes):
"""Project boxes into utm"""
with rasterio.open(raster_path) as dataset:
bounds = dataset.bounds
pixelSizeX, pixelSizeY = dataset.res
#subtract origin. Recall that numpy origin is top left! Not bottom left.
boxes["left"] = (boxes["xmin"] * pixelSizeX) + bounds.left
boxes["right"] = (boxes["xmax"] * pixelSizeX) + bounds.left
boxes["top"] = bounds.top - (boxes["ymin"] * pixelSizeY)
boxes["bottom"] = bounds.top - (boxes["ymax"] * pixelSizeY)
# combine column to a shapely Box() object, save shapefile
boxes['geometry'] = boxes.apply(
lambda x: shapely.geometry.box(x.left, x.top, x.right, x.bottom), axis=1)
boxes = geopandas.GeoDataFrame(boxes, geometry='geometry')
#set projection, (see dataset.crs) hard coded here
boxes.crs = {'init': "{}".format(dataset.crs)}
#Select columns
boxes = boxes[["left", "bottom", "right", "top", "score", "label", "geometry"]]
return boxes | 92e7bc01492b3370767ac56b18b2f937caafc6c3 | 21,329 |
def mean_relative_error(preds: Tensor, target: Tensor) -> Tensor:
"""
Computes mean relative error
Args:
preds: estimated labels
target: ground truth labels
Return:
Tensor with mean relative error
Example:
>>> from torchmetrics.functional import mean_relative_error
>>> x = torch.tensor([0., 1, 2, 3])
>>> y = torch.tensor([0., 1, 2, 2])
>>> mean_relative_error(x, y)
tensor(0.1250)
.. deprecated:: v0.4
Use :func:`torchmetrics.functional.mean_absolute_percentage_error`. Will be removed in v0.5.
"""
warn(
"Function `mean_relative_error` was deprecated v0.4 and will be removed in v0.5."
"Use `mean_absolute_percentage_error` instead.", DeprecationWarning
)
sum_rltv_error, n_obs = _mean_absolute_percentage_error_update(preds, target)
return _mean_absolute_percentage_error_compute(sum_rltv_error, n_obs) | 23c7efe3a91179c670383b1687583dc903052a54 | 21,330 |
from typing import Dict
from typing import Any
def render_dendrogram(dend: Dict["str", Any], plot_width: int, plot_height: int) -> Figure:
"""
Render a missing dendrogram.
"""
# list of lists of dcoords and icoords from scipy.dendrogram
xs, ys, cols = dend["icoord"], dend["dcoord"], dend["ivl"]
# if the number of columns is greater than 20, make the plot wider
if len(cols) > 20:
plot_width = 28 * len(cols)
fig = Figure(
plot_width=plot_width,
plot_height=plot_height,
toolbar_location=None,
tools="",
)
# round the coordinates to integers, and plot the dendrogram
xs = [[round(coord) for coord in coords] for coords in xs]
ys = [[round(coord, 2) for coord in coords] for coords in ys]
fig.multi_line(xs=xs, ys=ys, line_color="#8073ac")
# extract the horizontal lines for the hover tooltip
h_lns_x = [coords[1:3] for coords in xs]
h_lns_y = [coords[1:3] for coords in ys]
null_mismatch_vals = [coord[0] for coord in h_lns_y]
source = ColumnDataSource(dict(x=h_lns_x, y=h_lns_y, n=null_mismatch_vals))
h_lns = fig.multi_line(xs="x", ys="y", source=source, line_color="#8073ac")
hover_pts = HoverTool(
renderers=[h_lns],
tooltips=[("Average distance", "@n{0.1f}")],
line_policy="interp",
)
fig.add_tools(hover_pts)
# shorten column labels if necessary, and override coordinates with column names
cols = [f"{col[:16]}..." if len(col) > 18 else col for col in cols]
axis_coords = list(range(5, 10 * len(cols) + 1, 10))
axis_overrides = dict(zip(axis_coords, cols))
fig.xaxis.ticker = axis_coords
fig.xaxis.major_label_overrides = axis_overrides
fig.xaxis.major_label_orientation = np.pi / 3
fig.yaxis.axis_label = "Average Distance Between Clusters"
fig.grid.visible = False
return fig | 1dc61a5ddffc85e6baa9bfbb28620a3039dc8993 | 21,331 |
from typing import List
import logging
def sort_by_fullname(data: List[dict]) -> List[dict]:
""" sort data by full name
:param data:
:return:
"""
logging.info("Sorting data by fullname...")
try:
data.sort(key=lambda info: info["FULL_NAME"], reverse=False)
except Exception as exception:
logging.exception(exception)
raise
logging.info("Sort data by fullname successfully!")
return data | 0b4ecf53893bda7d226b3c26fe51b9abc073294b | 21,332 |
def get_vrf_interface(device, vrf):
""" Gets the subinterfaces for vrf
Args:
device ('obj'): device to run on
vrf ('str'): vrf to search under
Returns:
interfaces('list'): List of interfaces under specified vrf
None
Raises:
None
"""
log.info("Getting the interfaces under vrf {vrf}".format(vrf=vrf))
try:
out = device.parse("show vrf {vrf}".format(vrf=vrf))
except SchemaEmptyParserError:
return None
if out and "vrf" in out and vrf in out["vrf"]:
return out["vrf"][vrf].get("interfaces", None) | 57dedbd148f208038bd523c1901827ac7eca8754 | 21,333 |
def rsptext(rsp,subcode1=0,subcode2=0,erri='',cmd='',subcmd1='',subcmd2=''):
""" Adabas response code to text conversion """
global rspplugins
if rsp in rspplugins:
plugin = rspplugins[rsp] # get the plugin function
return plugin(rsp, subcode1=subcode1, subcode2=subcode2,
cmd=cmd,subcmd1=subcmd1,subcmd2=subcmd2)
c1=chr(subcode1 & 0xff)
c2=chr( (subcode1 >> 8)& 0xff)
c3=chr(subcode2 & 0xff)
c4=chr( (subcode2 >> 8)& 0xff)
if subcode2 == 0:
if subcode1>>16:
c1=chr( (subcode1 >> 24)& 0xff)
c2=chr( (subcode1 >> 16)& 0xff)
if c1 > '\x80' and c2 > '\x80':
c1 = str2asc(c1)
c2 = str2asc(c2)
if c1>' ' and c2>' ': # ff = field name if both bytes > ' '
ff='"'+c1+c2+'"'
elif c3>' ' and c4>' ':
ff='"'+c3+c4+'"'
else:
ff=''
if subcode2==0 and subcode1==0:
ss=''
else:
ss=' sub=%d,%d X%04X,%04X %s' % (subcode1,subcode2,subcode1,subcode2,ff)
if erri:
ss+=' errinf=%08X %r' % (erri,erri)
if rsp in rspdict:
subx='' # subcode text
rspx = rspdict[rsp]
if type(rspx) == type( (1,)) : # tuple type ?
subdict = rspx[1] # subcode dictionary
rspx=rspx[0] # response code text
sx2 = subcode2 & 0xffff
sx1 = subcode1 & 0xffff
subx = ''
if sx2 and sx2 in subdict:
subx += ' - \n\tSubcode %d: %s' % (sx2,subdict[sx2])
elif sx1 and sx1 in subdict:
subx = ' - \n\tSubcode %d: %s' % (sx1,subdict[sx1])
elif rsp==132: # if LOB resp & subcode not listed
subx = ' - \n\t'+rspdict.get(subcode2,'No details for subcode')
return 'Adabas Response %d%s: %s%s' %\
(rsp, ss, rspx, subx)
else:
return 'Adabas Response %s: no explanation available' % rsp | 3cc817e812ea7bba346338e09965e025639631eb | 21,334 |
def to_dataframe(data: xr.DataArray, *args, **kwargs) -> pd.DataFrame:
"""
Replacement for `xr.DataArray.to_dataframe` that adds the attrs for the given
DataArray into the resultant DataFrame.
Parameters
----------
data : xr.DataArray
the data to convert to DataFrame
Returns
-------
pd.DataFrame
a pandas DataFrame containing the data in the given DataArray, including the
global attributes
"""
df = data.to_dataframe(*args, **kwargs)
for k, v in data.attrs.items():
df[k] = v
return df | 69179fc48ce9ca04e8ee99967ce44b15946f9a57 | 21,335 |
def check_position(position):
"""Determines if the transform is valid. That is, not off-keypad."""
if position == (0, -3) or position == (4, -3):
return False
if (-1 < position[0] < 5) and (-4 < position[1] < 1):
return True
else:
return False | f95ab22ce8da386284040626ac90c908a17b53fa | 21,336 |
def mobilenet_wd4_cub(num_classes=200, **kwargs):
"""
0.25 MobileNet-224 model for CUB-200-2011 from 'MobileNets: Efficient Convolutional Neural Networks for Mobile
Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
num_classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_mobilenet(num_classes=num_classes, width_scale=0.25, model_name="mobilenet_wd4_cub", **kwargs) | f9e367058261da89a3714b543270628ab3941e12 | 21,337 |
import torch
def base_plus_copy_indices(words, dynamic_vocabs, base_vocab, volatile=False):
"""Compute base + copy indices.
Args:
words (list[list[unicode]])
dynamic_vocabs (list[HardCopyDynamicVocab])
base_vocab (HardCopyVocab)
volatile (bool)
Returns:
MultiVocabIndices
"""
unk = base_vocab.UNK
copy_seqs = []
for seq, dyna_vocab in izip(words, dynamic_vocabs):
word_to_copy = dyna_vocab.word_to_copy_token
normal_copy_seq = []
for w in seq:
normal_copy_seq.append(word_to_copy.get(w, unk))
copy_seqs.append(normal_copy_seq)
# each SeqBatch.values has shape (batch_size, seq_length)
base_indices = SequenceBatch.from_sequences(words, base_vocab, volatile=volatile)
copy_indices = SequenceBatch.from_sequences(copy_seqs, base_vocab, volatile=volatile)
assert_tensor_equal(base_indices.mask, copy_indices.mask)
# has shape (batch_size, seq_length, 2)
concat_values = torch.stack([base_indices.values, copy_indices.values], 2)
return MultiVocabIndices(concat_values, base_indices.mask) | e6e9d42186c05d33a04c58c506e0e9b97eadac6a | 21,338 |
def font_encoding(psname):
"""Return encoding name given a psname"""
return LIBRARY.encoding(psname) | fd5d2b000624a4d04980c88cc78cd97bf49bca94 | 21,339 |
def shader_with_tex_offset(offset):
"""Returns a vertex FileShader using a texture access with the given offset."""
return FileShader(shader_source_with_tex_offset(offset), ".vert") | 0df316dd97889b3b2541d6d21970768e1cb70fe6 | 21,340 |
def braycurtis(u, v):
"""
d = braycurtis(u, v)
Computes the Bray-Curtis distance between two n-vectors u and v,
\sum{|u_i-v_i|} / \sum{|u_i+v_i|}.
"""
u = np.asarray(u)
v = np.asarray(v)
return abs(u-v).sum() / abs(u+v).sum() | 693b7f0108f9f99e0950d81c2be1e9dc0bd25d86 | 21,341 |
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``fastai`` flavor.
"""
return _FastaiModelWrapper(_load_model(path)) | f8349d3580c5ca407a47b24f901c8f9a7f532c77 | 21,342 |
def fit_pseudo_voigt(x,y,p0=None,fit_alpha=True,alpha_guess=0.5):
"""Fits the data with a pseudo-voigt peak.
Parameters
-----------
x: np.ndarray
Array with x values
y: np.ndarray
Array with y values
p0: list (Optional)
It contains a initial guess the for the pseudo-voigt variables, in the order:
p0 = [x0,sigma,amplitude,constant,alpha]. If None, the code will create a guess.
fit_alpha: boolean (Optional)
Option to fit the alpha parameter.
alpha_guess: float (Optional)
If alpha is being fitted, then this will be the initial guess. Otherwise it will be the fixed parameter used.
For lorenzian: alpha = 1, for gaussian: alpha = 0.
Returns
-----------
popt: np.ndarray
Array with the optimized pseudo-voigt parameters.
"""
if p0 is None:
width = (x.max()-x.min())/10.
index = y == y.max()
p0 = [x[index][0],width,y.max()*width*np.sqrt(np.pi/np.log(2)),y[0],alpha_guess]
if fit_alpha is False:
popt,pcov = curve_fit(lambda x,x0,sigma,amplitude,constant: pseudo_voigt(x,x0,sigma,amplitude,constant,alpha_guess),
x,y,p0=p0[:-1])
popt = np.append(popt,alpha_guess)
else:
popt,pcov = curve_fit(pseudo_voigt,x,y,p0=p0)
return popt | 8abd61b44665632cc4e2ae21f52116757e00d2b9 | 21,343 |
from datetime import datetime
def get_name_of_day(str_date):
"""
Возвращает имя дня.
"""
day = datetime.fromisoformat(str_date).weekday()
return DAYS_NAME.get(day) | ae12d7b8ec44c2fb6edcf252ed3463a385353f30 | 21,345 |
def k_fold_split(ratings, min_num_ratings=10, k=4):
"""
Creates the k (training set, test_set) used for k_fold cross validation
:param ratings: initial sparse matrix of shape (num_items, num_users)
:param min_num_ratings: all users and items must have at least min_num_ratings per user and per item to be kept
:param k: number of fold
:return: a list fold of length k such that
- fold[l][0] is a list of tuples (i,j) of the entries of 'ratings' that are the l-th testing set
- fold[l][1] is a list of tuples (i,j) of the entries of 'ratings' that are the l-th training set
"""
num_items_per_user = np.array((ratings != 0).sum(axis=0)).flatten()
num_users_per_item = np.array((ratings != 0).sum(axis=1).T).flatten()
# set seed
np.random.seed(988)
# select user and item based on the condition.
valid_users = np.where(num_items_per_user >= min_num_ratings)[0]
valid_items = np.where(num_users_per_item >= min_num_ratings)[0]
valid_ratings = ratings[valid_items, :][:, valid_users]
nnz_row, nnz_col = valid_ratings.nonzero()
nnz = list(zip(nnz_row, nnz_col))
nnz = np.random.permutation(nnz)
len_splits = int(len(nnz) / k)
splits = []
for i in range(k):
splits.append(nnz[i * len_splits: (i + 1) * len_splits])
splits = [f.tolist() for f in splits]
folds = []
for i in range(k):
tmp = []
for j in range(k):
if j != i:
tmp = tmp + splits[j]
folds.append([splits[i], tmp])
return folds | 8b151e291e3365d7986cdc7b876ef630efcb60b4 | 21,346 |
def merge_dict(a, b, path:str=None):
"""
Args:
a:
b:
path(str, optional): (Default value = None)
Returns:
Raises:
"""
"merges b into a"
if path is None: path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_dict(a[key], b[key], path + [str(key)])
else:
a[key] = b[key]
else:
a[key] = b[key]
return a | cd260c005b07c9c84b14a14cae3d4dc54fe26b8c | 21,347 |
import json
def validateSignedOfferData(adat, ser, sig, tdat, method="igo"):
"""
Returns deserialized version of serialization ser which Offer
if offer request is correctly formed.
Otherwise returns None
adat is thing's holder/owner agent resource
ser is json encoded unicode string of request
sig is base64 encoded signature from request header "signer" tag
tdat is thing data resource
offer request fields
{
"uid": offeruniqueid,
"thing": thingDID,
"aspirant": AgentDID,
"duration": timeinsecondsofferisopen,
}
"""
try:
try: # get signing key of request from thing resource
(adid, index, akey) = extractDatSignerParts(tdat)
except ValueError as ex:
raise ValidationError("Missing or invalid signer")
# get agent key at index from signer data. assumes that resource is valid
try:
averkey = adat["keys"][index]["key"]
except (TypeError, KeyError, IndexError) as ex:
raise ValidationError("Missing or invalid signer key")
if len(averkey) != 44:
raise ValidationError("Invalid signer key") # invalid length for base64 encoded key
# verify request using agent signer verify key
if not verify64u(sig, ser, averkey):
raise ValidationError("Unverifiable signatrue") # signature fails
# now validate offer data
try:
dat = json.loads(ser, object_pairs_hook=ODict)
except ValueError as ex:
raise ValidationError("Invalid json") # invalid json
if not dat: # offer request must not be empty
raise ValidationError("Empty body")
if not isinstance(dat, dict): # must be dict subclass
raise ValidationError("JSON not dict")
requireds = ("uid", "thing", "aspirant", "duration")
for field in requireds:
if field not in dat:
raise ValidationError("Missing missing required field {}".format(field))
if not dat["uid"]: # uid must not be empty
raise ValidationError("Empty uid")
if dat["thing"] != tdat['did']:
raise ValidationError("Not same thing")
aspirant = dat["aspirant"]
try: # correct did format pre:method:keystr
pre, meth, keystr = aspirant.split(":")
except ValueError as ex:
raise ValidationError("Invalid aspirant")
if pre != "did" or meth != method:
raise ValidationError("Invalid aspirant") # did format bad
try:
duration = float(dat["duration"])
except ValueError as ex:
raise ValidationError("Invalid duration")
if duration < PROPAGATION_DELAY * 2.0:
raise ValidationError("Duration too short")
except ValidationError:
raise
except Exception as ex: # unknown problem
raise ValidationError("Unexpected error")
return dat | 4aebe14b8a90dc1c3e47763ce49e142d77f99bd9 | 21,348 |
def get_relevant_phrases(obj=None):
""" Get all phrases to be searched for. This includes all SensitivePhrases, and any RelatedSensitivePhrases that
refer to the given object.
:param obj: A model instance to check for sensitive phrases made specifically for that instance.
:return: a dictionary of replacement phrases keyed by the phrases being replaced.
"""
replacements = []
content_type = ContentType.objects.get_for_model(obj)
related_sensitive_phrases = RelatedSensitivePhrase.objects.filter(
content_type__pk=content_type.id,
object_id=obj.id
).extra(select={'length': 'Length(phrase)'}).order_by('-length', 'phrase')
for phrase in related_sensitive_phrases:
replacements.append({
'phrase': phrase.phrase,
'replacement': phrase.replace_phrase,
'start_boundary': phrase.check_for_word_boundary_start,
'end_boundary': phrase.check_for_word_boundary_end
})
sensitive_phrases = SensitivePhrase.objects.all() \
.extra(select={'length': 'Length(phrase)'}).order_by('-length', 'phrase')
for phrase in sensitive_phrases:
replacements.append({
'phrase': phrase.phrase,
'replacement': phrase.replace_phrase,
'start_boundary': phrase.check_for_word_boundary_start,
'end_boundary': phrase.check_for_word_boundary_end
})
return replacements | 951166c89dc8e257bce512d13cee592e1266efae | 21,349 |
import struct
def _prepare_cabal_inputs(
hs,
cc,
posix,
dep_info,
cc_info,
direct_cc_info,
component,
package_id,
tool_inputs,
tool_input_manifests,
cabal,
setup,
setup_deps,
setup_dep_info,
srcs,
compiler_flags,
flags,
generate_haddock,
cabal_wrapper,
package_database,
verbose,
transitive_haddocks,
dynamic_binary = None):
"""Compute Cabal wrapper, arguments, inputs."""
with_profiling = is_profiling_enabled(hs)
# Haskell library dependencies or indirect C library dependencies are
# already covered by their corresponding package-db entries. We only need
# to add libraries and headers for direct C library dependencies to the
# command line.
direct_libs = get_ghci_library_files(hs, cc.cc_libraries_info, cc.cc_libraries)
# The regular Haskell rules perform mostly static linking, i.e. where
# possible all C library dependencies are linked statically. Cabal has no
# such mode, and since we have to provide dynamic C libraries for
# compilation, they will also be used for linking. Hence, we need to add
# RUNPATH flags for all dynamic C library dependencies. Cabal also produces
# a dynamic and a static Haskell library in one go. The dynamic library
# will link other Haskell libraries dynamically. For those we need to also
# provide RUNPATH flags for dynamic Haskell libraries.
(_, dynamic_libs) = get_library_files(
hs,
cc.cc_libraries_info,
cc.transitive_libraries,
dynamic = True,
)
# Executables build by Cabal will link Haskell libraries statically, so we
# only need to include dynamic C libraries in the runfiles tree.
(_, runfiles_libs) = get_library_files(
hs,
cc.cc_libraries_info,
get_cc_libraries(cc.cc_libraries_info, cc.transitive_libraries),
dynamic = True,
)
# Setup dependencies are loaded by runghc.
setup_libs = get_ghci_library_files(hs, cc.cc_libraries_info, cc.setup_libraries)
# The regular Haskell rules have separate actions for linking and
# compilation to which we pass different sets of libraries as inputs. The
# Cabal rules, in contrast, only have a single action for compilation and
# linking, so we must provide both sets of libraries as inputs to the same
# action.
transitive_compile_libs = get_ghci_library_files(hs, cc.cc_libraries_info, cc.transitive_libraries)
transitive_link_libs = _concat(get_library_files(hs, cc.cc_libraries_info, cc.transitive_libraries))
env = dict(hs.env)
env["PATH"] = join_path_list(hs, _binary_paths(tool_inputs) + posix.paths)
if hs.toolchain.is_darwin:
env["SDKROOT"] = "macosx" # See haskell/private/actions/link.bzl
if verbose:
env["CABAL_VERBOSE"] = "True"
args = hs.actions.args()
package_databases = dep_info.package_databases
transitive_headers = cc_info.compilation_context.headers
direct_include_dirs = depset(transitive = [
direct_cc_info.compilation_context.includes,
direct_cc_info.compilation_context.quote_includes,
direct_cc_info.compilation_context.system_includes,
])
direct_lib_dirs = [file.dirname for file in direct_libs]
args.add_all([component, package_id, generate_haddock, setup, cabal.dirname, package_database.dirname])
args.add_joined([
arg
for package_id in setup_deps
for arg in ["-package-id", package_id]
] + [
arg
for package_db in setup_dep_info.package_databases.to_list()
for arg in ["-package-db", "./" + _dirname(package_db)]
], join_with = " ", format_each = "--ghc-arg=%s", omit_if_empty = False)
args.add("--flags=" + " ".join(flags))
args.add_all(compiler_flags, format_each = "--ghc-option=%s")
if dynamic_binary:
args.add_all(
[
"--ghc-option=-optl-Wl,-rpath," + create_rpath_entry(
binary = dynamic_binary,
dependency = lib,
keep_filename = False,
prefix = relative_rpath_prefix(hs.toolchain.is_darwin),
)
for lib in dynamic_libs
],
uniquify = True,
)
args.add("--")
args.add_all(package_databases, map_each = _dirname, format_each = "--package-db=%s")
args.add_all(direct_include_dirs, format_each = "--extra-include-dirs=%s")
args.add_all(direct_lib_dirs, format_each = "--extra-lib-dirs=%s", uniquify = True)
if with_profiling:
args.add("--enable-profiling")
# Redundant with _binary_paths() above, but better be explicit when we can.
args.add_all(tool_inputs, map_each = _cabal_tool_flag)
inputs = depset(
[setup, hs.tools.ghc, hs.tools.ghc_pkg, hs.tools.runghc],
transitive = [
depset(srcs),
depset(cc.files),
package_databases,
setup_dep_info.package_databases,
transitive_headers,
depset(setup_libs),
depset(transitive_compile_libs),
depset(transitive_link_libs),
depset(transitive_haddocks),
setup_dep_info.interface_dirs,
setup_dep_info.hs_libraries,
dep_info.interface_dirs,
dep_info.hs_libraries,
tool_inputs,
],
)
input_manifests = tool_input_manifests + hs.toolchain.cc_wrapper.manifests
return struct(
cabal_wrapper = cabal_wrapper,
args = args,
inputs = inputs,
input_manifests = input_manifests,
env = env,
runfiles = depset(direct = runfiles_libs),
) | 4d42e6b772a64bc721e30907417dd8c734ce79e6 | 21,350 |
def split_kp(kp_joined, detach=False):
"""
Split the given keypoints into two sets(one for driving video frames, and the other for source image)
"""
if detach:
kp_video = {k: v[:, 1:].detach() for k, v in kp_joined.items()}
kp_appearance = {k: v[:, :1].detach() for k, v in kp_joined.items()}
else:
kp_video = {k: v[:, 1:] for k, v in kp_joined.items()}
kp_appearance = {k: v[:, :1] for k, v in kp_joined.items()}
return {'kp_driving': kp_video, 'kp_source': kp_appearance} | 0396003a17172a75b121ddb43c9b9cf14ee3e458 | 21,351 |
def low_shelve(signal, frequency, gain, order, shelve_type='I',
sampling_rate=None):
"""
Create and apply first or second order low shelve filter.
Uses the implementation of [#]_.
Parameters
----------
signal : Signal, None
The Signal to be filtered. Pass None to create the filter without
applying it.
frequency : number
Characteristic frequency of the shelve in Hz
gain : number
Gain of the shelve in dB
order : number
The shelve order. Must be ``1`` or ``2``.
shelve_type : str
Defines the characteristic frequency. The default is ``'I'``
``'I'``
defines the characteristic frequency 3 dB below the gain value if
the gain is positive and 3 dB above the gain value otherwise
``'II'``
defines the characteristic frequency at 3 dB if the gain is
positive and at -3 dB if the gain is negative.
``'III'``
defines the characteristic frequency at gain/2 dB
sampling_rate : None, number
The sampling rate in Hz. Only required if signal is ``None``. The
default is ``None``.
Returns
-------
signal : Signal
The filtered signal. Only returned if ``sampling_rate = None``.
filter : FilterIIR
Filter object. Only returned if ``signal = None``.
References
----------
.. [#] https://github.com/spatialaudio/digital-signal-processing-lecture/\
blob/master/filter_design/audiofilter.py
"""
output = _shelve(
signal, frequency, gain, order, shelve_type, sampling_rate, 'low')
return output | 130fd593988d1fd0b85795389dab554d59fedb97 | 21,352 |
from typing import Union
def get_breast_zone(mask: np.ndarray, convex_contour: bool = False) -> Union[np.ndarray, tuple]:
"""
Función de obtener la zona del seno de una imagen a partir del area mayor contenido en una mascara.
:param mask: mascara sobre la cual se realizará la búsqueda de contornos y de las zonas más largas.
:param convex_contour: boleano para aplicar contornos convexos.
:return: Máscara que contiene el contorno con mayor area juntamente con el vértice x e y con la anchura y la altura
del cuadrado que contienen la zona de mayor area de la mascara-
"""
# Se obtienen los contornos de las zonas de la imagen de color blanco.
contours = get_contours(img=mask)
# Se obtiene el contorno más grande a partir del area que contiene
largest_countour = sorted(contours, key=cv2.contourArea, reverse=True)[0]
# Se modifican los contornos si se decide obtener contornos convexos.
if convex_contour:
largest_countour = cv2.convexHull(largest_countour)
# Se crea la máscara con el area y el contorno obtenidos.
breast_zone = cv2.drawContours(
image=np.zeros(mask.shape, np.uint8), contours=[largest_countour], contourIdx=-1, color=(255, 255, 255),
thickness=-1
)
# Se obtiene el rectangulo que contiene el pecho
x, y, w, h = cv2.boundingRect(largest_countour)
return breast_zone, (x, y, w, h) | 429344c0645fa7bcfa49abcaf9b022f61c48bc35 | 21,353 |
def replace(temporaryans, enterword, answer):
"""
:param temporaryans: str, temporary answer.
:param enterword: str, the character that user guesses.
:param answer: str, the answer for this hangman game.
:return: str, the temporary answer after hyphens replacement.
"""
# s = replace('-----', 'A', answer)
while True:
i = answer.find(enterword)
if i >= 0:
y = temporaryans[:i]
# ---
y += enterword
# ---A
y += temporaryans[i+1:]
# ---A-
temporaryans = y
answer = answer[:i] + '-' + answer[i+1:]
else:
ans = y
break
return ans | 80d8625dca573744e9945190ee169438754b1829 | 21,354 |
def extract_timestamp(line):
"""Extract timestamp and convert to a form that gives the
expected result in a comparison
"""
# return unixtime value
return line.split('\t')[6] | 84618f02e4116c70d9f6a1518aafb0691a29ef07 | 21,355 |
def svn_stream_from_stringbuf(*args):
"""svn_stream_from_stringbuf(svn_stringbuf_t str, apr_pool_t pool) -> svn_stream_t"""
return _core.svn_stream_from_stringbuf(*args) | 9710061adb6d80527a3f3afa84bf41e0fa6406c6 | 21,356 |
def get_autoencoder_model(hidden_units, target_predictor_fn,
activation, add_noise=None, dropout=None):
"""Returns a function that creates a Autoencoder TensorFlow subgraph.
Args:
hidden_units: List of values of hidden units for layers.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes x, y and returns predictions and loss
tensors.
activation: activation function used to map inner latent layer onto
reconstruction layer.
add_noise: a function that adds noise to tensor_in,
e.g. def add_noise(x):
return(x + np.random.normal(0, 0.1, (len(x), len(x[0]))))
dropout: When not none, causes dropout regularization to be used,
with the specified probability of removing a given coordinate.
Returns:
A function that creates the subgraph.
"""
def dnn_autoencoder_estimator(x):
"""Autoencoder estimator with target predictor function on top."""
encoder, decoder = autoencoder_ops.dnn_autoencoder(
x, hidden_units, activation,
add_noise=add_noise, dropout=dropout)
return encoder, decoder, target_predictor_fn(x, decoder)
return dnn_autoencoder_estimator | 88c58b2c43c26aa8e71baf684688d27db251cdb6 | 21,357 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.