content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def create_contact():
"""
Get a contact form submission
"""
data = request.get_json(force=True)
contact = ContactDAO.create(**data)
return jsonify(contact.to_dict()) | af2c5efbd06d3220faf3b16059ea9d612cece19e | 10,613 |
from typing import DefaultDict
from typing import Tuple
from typing import List
import copy
def separate_sets(
hand: DefaultDict[int, int], huro_count: int, koutsu_first: bool = True
) -> Tuple[List[Tile], List[List[Tile]], Tile]:
"""Helper function for seperating player's remaining hands into sets.
It should either be 14, 11, 8, 5, or 2 tiles.
The arg koutsu_first would change the priority for koutsu and shuntsu,
for example in the usecase for checking 全帯么九, shuntsu should have
priority over koutsu.
"""
def check_koutsu(sets_to_find):
if remain_tiles[tile_index] >= 3: # check for Koutsu
remain_tiles[tile_index] -= 3
sets_to_find -= 1
koutsu.append(Tile.from_index(tile_index))
return sets_to_find
def check_shuntsu(sets_to_find):
if remain_tiles[tile_index + 2] > 0: # check for Shuntsu
chii_n = min(
remain_tiles[tile_index],
remain_tiles[tile_index + 1],
remain_tiles[tile_index + 2]
)
if chii_n > 0:
remain_tiles[tile_index] -= chii_n
remain_tiles[tile_index + 1] -= chii_n
remain_tiles[tile_index + 2] -= chii_n
sets_to_find -= chii_n
for _ in range(chii_n):
shuntsu.append([
Tile.from_index(tile_index),
Tile.from_index(tile_index + 1),
Tile.from_index(tile_index + 2)
])
return sets_to_find
for possible_jantou in hand.keys():
if hand[possible_jantou] >= 2: # try using it as jantou
remain_tiles = copy.deepcopy(hand)
remain_tiles[possible_jantou] -= 2
koutsu = []
shuntsu = []
sets_to_find = 4 - huro_count
for tile_index in sorted(remain_tiles.keys()):
if tile_index < Tile(Suit.MANZU.value, 1).index:
if remain_tiles[tile_index] == 3:
sets_to_find -= 1
koutsu.append(Tile.from_index(tile_index))
else: # numbered tiles
if koutsu_first:
sets_to_find = check_koutsu(sets_to_find)
sets_to_find = check_shuntsu(sets_to_find)
else:
sets_to_find = check_shuntsu(sets_to_find)
sets_to_find = check_koutsu(sets_to_find)
if sets_to_find == 0:
return koutsu, shuntsu, Tile.from_index(possible_jantou)
return [], [], None | 894a712a739e16a98e2150c4461a3d66c759bace | 10,615 |
def units(legal_codes):
"""
Return sorted list of the unique units for the given
dictionaries representing legal_codes
"""
return sorted(set(lc["unit"] for lc in legal_codes)) | 85803ecb3d1f51c058c959b7e060c3cb5263f6a3 | 10,616 |
def resize_terms(terms1, terms2, patterns_to_pgS, use_inv):
"""
Resize the terms to ensure that the probabilities are the same on both sides.
This is necessary to maintain the null hypothesis that D = 0 under no introgression.
Inputs:
terms1 --- a set of patterns to count and add to each other to determine introgression
terms2 --- a set of other patterns to count and add to each other to determine introgression
use_inv --- boolean for determining if inverse site patterns will be used
patterns_to_pgS --- a mapping of site patterns to their p(gt|st) values
Outputs:
terms1 --- a set of patterns to count and add to each other to determine introgression
terms2 --- a set of other patterns to count and add to each other to determine introgression
"""
terms1 = list(terms1)
terms2 = list(terms2)
# Create a mapping of pgtst to trees for each term
pgtst_to_trees1 = defaultdict(set)
pgtst_to_trees2 = defaultdict(set)
for tree in terms1:
# Round the probability to the 15th digit to prevent the randomness issues with small values
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees1[prob].add(tree)
for tree in terms2:
# Round the probability to the 15th digit to prevent the randomness issues with small values
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees2[prob].add(tree)
# Balance terms
terms1_prob_counts = defaultdict(int)
terms2_prob_counts = defaultdict(int)
# Round each probability and count the number of times it occurs
for tree in terms1:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms1_prob_counts[prob] += 1
for tree in terms2:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms2_prob_counts[prob] += 1
# Iterate over each probability
for prob in terms1_prob_counts:
# Get the number of times each probability occurs
count1, count2 = terms1_prob_counts[prob], terms2_prob_counts[prob]
removed = set([])
# The number of site patterns to remove is the difference in counts
num_remove = abs(count2 - count1)
if use_inv:
# If not using inverses remove the inverse along with the normal pattern
num_remove = num_remove / 2
# If probabilities do not occur an equal number of times remove site patterns until they do
if count1 > count2:
for i in range(num_remove):
# Get a pattern to remove and remove it from the possible removals
r = sorted(list(pgtst_to_trees1[prob])).pop(0)
pgtst_to_trees1[prob].remove(r)
removed.add(r)
terms1_remove = True
if count1 < count2:
for i in range(num_remove):
# Get a pattern to remove and remove it from the possible removals
r = sorted(list(pgtst_to_trees2[prob])).pop(0)
pgtst_to_trees2[prob].remove(r)
removed.add(r)
terms1_remove = False
if use_inv:
# Remove site patterns and their inverses
rm = set([])
inv_rm = pattern_inverter(removed)
for pattern in inv_rm:
rm.add(''.join(pattern))
removed = removed.union(rm)
# Iterate over each pattern to be removed and remove it
for pattern in removed:
if terms1_remove:
terms1.remove(pattern)
else:
terms2.remove(pattern)
terms1, terms2 = tuple(terms1), tuple(terms2)
return terms1, terms2 | d422e3d5b32df55036afa3788cdb0bdd4aa95001 | 10,617 |
def get_network_list():
"""Get a list of networks.
---
tags:
- network
"""
return jsonify([
network.to_json(include_id=True)
for network in manager.cu_list_networks()
]) | 6a54b76091160fc28cd45502aea4c54d2862a588 | 10,618 |
def bfixpix(data, badmask, n=4, retdat=False):
"""Replace pixels flagged as nonzero in a bad-pixel mask with the
average of their nearest four good neighboring pixels.
:INPUTS:
data : numpy array (two-dimensional)
badmask : numpy array (same shape as data)
:OPTIONAL_INPUTS:
n : int
number of nearby, good pixels to average over
retdat : bool
If True, return an array instead of replacing-in-place and do
_not_ modify input array `data`. This is always True if a 1D
array is input!
:RETURNS:
another numpy array (if retdat is True)
:TO_DO:
Implement new approach of Popowicz+2013 (http://arxiv.org/abs/1309.4224)
"""
# 2010-09-02 11:40 IJC: Created
#2012-04-05 14:12 IJMC: Added retdat option
# 2012-04-06 18:51 IJMC: Added a kludgey way to work for 1D inputs
# 2012-08-09 11:39 IJMC: Now the 'n' option actually works.
if data.ndim==1:
data = np.tile(data, (3,1))
badmask = np.tile(badmask, (3,1))
ret = bfixpix(data, badmask, n=2, retdat=True)
return ret[1]
nx, ny = data.shape
badx, bady = np.nonzero(badmask)
nbad = len(badx)
if retdat:
data = np.array(data, copy=True)
for ii in range(nbad):
thisloc = badx[ii], bady[ii]
rad = 0
numNearbyGoodPixels = 0
while numNearbyGoodPixels<n:
rad += 1
xmin = max(0, badx[ii]-rad)
xmax = min(nx, badx[ii]+rad)
ymin = max(0, bady[ii]-rad)
ymax = min(ny, bady[ii]+rad)
x = np.arange(nx)[xmin:xmax+1]
y = np.arange(ny)[ymin:ymax+1]
yy,xx = np.meshgrid(y,x)
#print ii, rad, xmin, xmax, ymin, ymax, badmask.shape
rr = abs(xx + 1j*yy) * (1. - badmask[xmin:xmax+1,ymin:ymax+1])
numNearbyGoodPixels = (rr>0).sum()
closestDistances = np.unique(np.sort(rr[rr>0])[0:n])
numDistances = len(closestDistances)
localSum = 0.
localDenominator = 0.
for jj in range(numDistances):
localSum += data[xmin:xmax+1,ymin:ymax+1][rr==closestDistances[jj]].sum()
localDenominator += (rr==closestDistances[jj]).sum()
#print badx[ii], bady[ii], 1.0 * localSum / localDenominator, data[xmin:xmax+1,ymin:ymax+1]
data[badx[ii], bady[ii]] = 1.0 * localSum / localDenominator
if retdat:
ret = data
else:
ret = None
return ret | ae6b6c44e82dc70f998b31d9645cf74fef92c9fd | 10,619 |
import re
def parse_discount(element):
"""Given an HTML element, parse and return the discount."""
try:
# Remove any non integer characters from the HTML element
discount = re.sub("\D", "", element)
except AttributeError:
discount = "0"
return discount | 658f8a6bef8ba4bf82646a10c495904c03a717c7 | 10,620 |
import bisect
def read_files(allVCFs):
"""
Load all vcfs and count their number of entries
"""
# call exists in which files
call_lookup = defaultdict(list)
# total number of calls in a file
file_abscnt = defaultdict(float)
for vcfn in allVCFs:
v = parse_vcf(vcfn)
# disallow intra vcf duplicates
seen = {}
for entry in v:
key = entry_key(entry)
if key in seen:
continue
seen[key] = True
bisect.insort(call_lookup[key], vcfn)
file_abscnt[vcfn] += 1
return call_lookup, file_abscnt | 8518eac3c43772016fd5cbe0fd6c423a1e463ebc | 10,621 |
def parse_dat_file(dat_file):
"""
Parse a complete dat file.
dat files are transposed wrt the rest of the data formats here. In addition, they only contain integer fields,
so we can use np.loadtxt.
First 6 columns are ignored.
Note: must have a bims and info file to process completely.
Parameters
----------
dat_file: str
Path for dat file to process.
Returns
-------
data: array-like
"""
data = np.loadtxt(dat_file)
data = data[:, 6:].T
return data | 3b84730a347075c5be1e0ebe5a195338a86ed0c6 | 10,622 |
from typing import List
def next_whole_token(
wordpiece_subtokens,
initial_tokenizer,
subword_tokenizer):
"""Greedily reconstitutes a whole token from a WordPiece list.
This function assumes that the wordpiece subtokens were constructed correctly
from a correctly subtokenized CuBERT tokenizer, but the sequence may be
truncated and thus incomplete.
The implementation is done in two stages: recognizing the first whole token
and then finding the correspondence of that first whole token to a prefix of
the subtoken sequence.
The implementation assumes that untokenization can do the best job on the full
context. So, it first untokenizes the whole sequence, and chooses the first
whole token.
To figure out the subtoken prefix that corresponds to that whole token, the
implementation greedily untokenizes longer and longer subtoken prefixes, until
the whole token is recognized in the output.
The reason for this somewhat expensive implementation is that the logic for
merging subtokens (for WordPiece and then for CuBERT) is intricate, and does
not export how many initial subtokens were consumed for each output token of
the next higher abstraction. What's more, a subtoken may align itself with
the previous or the next whole token, when the subtoken sequence is
incomplete.
Args:
wordpiece_subtokens: The subtokens to scan through.
initial_tokenizer: A CuBERT tokenizer.
subword_tokenizer: A SubwordTextEncoder.
Returns:
The first whole token matched, and the end index of the first subtoken index
after the first whole token. wordpiece_subtokens[0:end_index] should be
the subtokens corresponding to the whole token returned.
Raises:
ValueError if no whole token can be parsed.
"""
wordpiece_ids = wordpiece_ids_from_wordpiece_tokens(wordpiece_subtokens,
subword_tokenizer)
full_cubert_subtokens: List[str] = (
subword_tokenizer._subtoken_ids_to_tokens( # pylint: disable=protected-access
wordpiece_ids))
full_cubert_subtokens.append(
unified_tokenizer.quote_special(unified_tokenizer.TokenKind.EOS.name))
full_whole_tokens = initial_tokenizer.untokenize_agnostic(
full_cubert_subtokens)
if len(full_whole_tokens) < 2:
# It all came out a jumble. Reject it.
raise ValueError(f'Whole tokens {full_whole_tokens} ended up '
f'undifferentiable in {wordpiece_subtokens}.')
whole_token = full_whole_tokens[0]
for end_index in range(1, len(wordpiece_ids) + 1):
prefix_list = wordpiece_ids[:end_index]
partial_cubert_subtokens: List[str] = (
subword_tokenizer._subtoken_ids_to_tokens( # pylint: disable=protected-access
prefix_list))
# We strip EOS in `code_to_cubert_sentences`, so we have to add it back
# here.
partial_cubert_subtokens.append(
unified_tokenizer.quote_special(unified_tokenizer.TokenKind.EOS.name))
partial_whole_tokens = initial_tokenizer.untokenize_agnostic(
partial_cubert_subtokens)
if len(partial_whole_tokens) > 1:
if partial_whole_tokens[0] == whole_token:
return whole_token, end_index
# We got here because we couldn't match the whole token we found from the
# full sequence
raise ValueError('Could not find a whole token in %r' %
(wordpiece_subtokens,)) | d26f4da0932030242c2209bc998bc32b6ce98cdf | 10,625 |
def match_seq_len(*arrays: np.ndarray):
"""
Args:
*arrays:
Returns:
"""
max_len = np.stack([x.shape[-1] for x in arrays]).max()
return [np.pad(x, pad_width=((0, 0), (0, 0), (max_len - x.shape[-1], 0)), mode='constant', constant_values=0) for x
in arrays] | 2cd8715eb634e0b3604e1d5c305a5209bb0ae03d | 10,627 |
import torch
import math
def get_cmws_5_loss(
generative_model, guide, memory, obs, obs_id, num_particles, num_proposals, insomnia=1.0
):
"""Normalize over particles-and-memory for generative model gradient
Args:
generative_model
guide
memory
obs: tensor of shape [batch_size, *obs_dims]
obs_id: long tensor of shape [batch_size]
num_particles (int): number of particles used to marginalize continuous latents
num_proposals (int): number of proposed elements to be considered as new memory
Returns: [batch_size]
"""
# Extract
batch_size = obs.shape[0]
# SAMPLE d'_{1:R} ~ q(d | x)
# [num_proposals, batch_size, ...]
proposed_discrete_latent = guide.sample_discrete(obs, (num_proposals,))
# ASSIGN d_{1:(R + M)} = CONCAT(d'_{1:R}, d_{1:M})
# [memory_size + num_proposals, batch_size, ...]
discrete_latent_concat = cmws.memory.concat(memory.select(obs_id), proposed_discrete_latent)
# COMPUTE SCORES s_i = log p(d_i, x) for i {1, ..., (R + M)}
# -- c ~ q(c | d, x)
# [num_particles, memory_size + num_proposals, batch_size, ...]
_continuous_latent = guide.sample_continuous(obs, discrete_latent_concat, [num_particles])
# -- log q(c | d)
# [num_particles, memory_size + num_proposals, batch_size]
_log_q_continuous = guide.log_prob_continuous(obs, discrete_latent_concat, _continuous_latent)
# -- log p(d, c, x)
# [num_particles, memory_size + num_proposals, batch_size]
_log_p = generative_model.log_prob_discrete_continuous(
discrete_latent_concat, _continuous_latent, obs
)
# [memory_size + num_proposals, batch_size]
log_marginal_joint = torch.logsumexp(_log_p - _log_q_continuous, dim=0) - math.log(
num_particles
)
# ASSIGN d_{1:M} = TOP_K_UNIQUE(d_{1:(R + M)}, s_{1:(R + M)})
# [memory_size, batch_size, ...], [memory_size, batch_size]
discrete_latent_selected, _, indices = cmws.memory.get_unique_and_top_k(
discrete_latent_concat, log_marginal_joint, memory.size, return_indices=True
)
# SELECT log q(c | d, x) and log p(d, c, x)
# [num_particles, memory_size, batch_size]
_log_q_continuous = torch.gather(
_log_q_continuous, 1, indices[None].expand(num_particles, memory.size, batch_size)
)
# [num_particles, memory_size, batch_size]
_log_p = torch.gather(_log_p, 1, indices[None].expand(num_particles, memory.size, batch_size))
# COMPUTE WEIGHT
# [num_particles, memory_size, batch_size]
_log_weight = _log_p - _log_q_continuous
# COMPUTE log q(d_i | x) for i in {1, ..., M}
# [memory_size, batch_size]
_log_q_discrete = guide.log_prob_discrete(obs, discrete_latent_selected,)
# UPDATE MEMORY with d_{1:M}
memory.update(obs_id, discrete_latent_selected)
# CHECK UNIQUE
# if not memory.is_unique(obs_id).all():
# raise RuntimeError("memory not unique")
# COMPUTE losses
# --Compute generative model loss
# [num_particles, memory_size, batch_size]
_log_weight_v = torch.softmax(_log_weight.view(-1, batch_size), dim=0).view(
num_particles, memory.size, batch_size
)
# [batch_size]
generative_model_loss = -(_log_weight_v.detach() * _log_p).sum(dim=[0, 1])
# --Compute guide loss
# ----Compute guide wake loss
batch_size = obs.shape[0]
if insomnia < 1.0:
# [batch_size]
guide_loss_sleep = (
get_sleep_loss(generative_model, guide, num_particles * batch_size)
.view(batch_size, num_particles)
.mean(-1)
)
# ----Compute guide CMWS loss
if insomnia > 0.0:
# [memory_size, batch_size]
_log_weight_omega = torch.logsumexp(_log_weight_v, dim=0)
# [batch_size]
discrete_guide_loss_cmws = -(_log_weight_omega.detach() * _log_q_discrete).sum(dim=0)
# [batch_size]
continuous_guide_loss_cmws = -(
(torch.softmax(_log_weight, dim=0).detach() * _log_q_continuous).sum(dim=0).mean(dim=0)
)
# [batch_size]
guide_loss_cmws = discrete_guide_loss_cmws + continuous_guide_loss_cmws
# ----Combine guide sleep and CMWS losses
if insomnia == 0.0:
guide_loss = guide_loss_sleep
elif insomnia == 1.0:
guide_loss = guide_loss_cmws
else:
guide_loss = insomnia * guide_loss_cmws + (1 - insomnia) * guide_loss_sleep
return generative_model_loss + guide_loss | 5e2b87a7d19eab1f09e5207f4c16c8e4a56b2225 | 10,628 |
import torch
def to_float_tensor(np_array):
"""
convert to long torch tensor
:param np_array:
:return:
"""
return torch.from_numpy(np_array).type(torch.float) | 84512d8383999bf22841c0e7e1fc8048bcba9a1a | 10,630 |
def display_heatmap(salience_scores,
salience_scores_2=None,
title=None,
title_2=None,
cell_labels=None,
cell_labels_2=None,
normalized=True,
ui=False):
"""
A utility function that displays a Seaborn heatmap.
Input:
- ('salience scores') A list of floats .
If task is something like NLI, then these are the salience scores for the premise, or first
sequence.
- ('salience_scores_2') A list of floats .
Optional. Only necessary when task is a relation labeling task between 2 sequences
like NLI. Then these are the salience scores for the hypothesis, or second sequence.
- ('title') Any object (string, integer, float, etc.) that can be printed.
Optional.
Usually is descriptive blurb for the heatmap for ('salience_scores')
- ('title_2') Any object (string, integer, float, etc.) that can be printed.
Optional. Usually is descriptive blurb for the heatmap for ('salience scores_2')
- ('cell_labels') Optional. list of the same size as ('salience_scores') that is printed
on the corresponding cell. Usually something like salience score values.
- ('cell_labels_2') Optional. list of the same size as ('salience_scores_2') that is printed
on the corresponding cell. Usually something like salience score values.
- ('normalized') A boolean denoting whether the data is normalized or not. If normalized,
the range is from -1 to 1.
- ('ui') A boolean for option of saving the plot instead to a file and returning the filename
Output:
- Return the matplotlib object
"""
if cell_labels is not None:
assert len(cell_labels) == len(salience_scores)
if cell_labels_2 is not None:
assert len(cell_labels_2) == len(salience_scores_2)
cmap = sns.diverging_palette(10, 240, as_cmap=True)
if salience_scores_2 is not None:
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12))
ax1.set_title(title if title is not None else "")
ax2.set_title(title_2 if title_2 is not None else "")
sns.heatmap([salience_scores],
ax=ax1,
annot=[cell_labels] if cell_labels is not None else False,
fmt='',
cmap=cmap,
linewidths=0.5,
square=True,
center=0,
vmin=-1 if normalized else None,
vmax=1 if normalized else None)
sns.heatmap([salience_scores_2],
ax=ax2,
annot=[cell_labels_2] if cell_labels_2 is not None else False,
fmt='',
cmap=cmap,
linewidths=0.5,
square=True,
center=0,
vmin=-1 if normalized else None,
vmax=1 if normalized else None)
else:
m = sns.heatmap([salience_scores],
annot=[cell_labels] if cell_labels is not None else False,
fmt='',
linewidths=0.5,
square=True,
cmap=cmap,
center=0,
vmin=-1 if normalized else None,
vmax=1 if normalized else None)
plt.title(title if title is not None else "")
#plt.show()
return plt | 8229db9630c8553567f0f93f8320c71397180ced | 10,631 |
import re
def _cleanse_line(line, main_character):
"""
Cleanse the extracted lines to remove formatting.
"""
# Strip the line, just in case.
line = line.strip()
# Clean up formatting characters.
line = line.replace('\\' , '') # Remove escape characters.
line = line.replace('[mc]', main_character) # Standardize MC name.
line = re.sub(r'{/?i}' , '*', line) # Convert italics to Markdown.
line = re.sub(r'{cps=\d+}', '' , line) # Remove scroll speed formatting.
return line | 87177c557ab89b77c63cc1df10874e52606258a7 | 10,632 |
def require_pandapower(f):
"""
Decorator for functions that require pandapower.
"""
@wraps(f)
def wrapper(*args, **kwds):
try:
getattr(pp, '__version__')
except AttributeError:
raise ModuleNotFoundError("pandapower needs to be manually installed.")
return f(*args, **kwds)
return wrapper | 35b0e5a5f9c4e189d849e3a6ba843b6f9e6b49b1 | 10,633 |
def optimal_path_fixture():
"""An optimal path, and associated distance, along the nodes of the pyramid"""
return [0, 1, 2, 3], 10 + 2 + 5 | 10c4e436907ecb99740a2514c927f05fd8488cf4 | 10,634 |
import timeit
def evaluate_DynamicHashtablePlusRemove(output=True):
"""
Compare performance using ability in open addressing to mark deleted values.
Nifty trick to produce just the squares as keys in the hashtable.
"""
# If you want to compare, then add following to end of executable statements:
# print([e[0] for e in ht])
tbl = DataTable([8,20,20], ['M', 'Separate Chaining', 'Open Addressing w/ Remove'], output=output)
for size in [512, 1024, 2048]:
linked_list = min(timeit.repeat(stmt='''
ht = Hashtable({0})
N = {0} // 4
for i in range(1, N, 1):
flip_every_k(ht, i, N)'''.format(size), setup='''
from ch03.hashtable_linked import Hashtable
from ch03.challenge import flip_every_k''', repeat=7, number=5))/5
hashtable_plus = min(timeit.repeat(stmt='''
ht = DynamicHashtablePlusRemove({0})
N = {0} // 4
for i in range(1, N, 1):
flip_every_k(ht, i, N)'''.format(size), setup='''
from ch03.hashtable_open import DynamicHashtablePlusRemove
from ch03.challenge import flip_every_k''', repeat=7, number=5))/5
tbl.row([size, linked_list, hashtable_plus])
return tbl | 243b5f4972b8eaa2630b6920c2d640d729feae61 | 10,635 |
def closest(lat1, lon1):
"""Return distance (km) and city closest to given coords."""
lat1, lon1 = float(lat1), float(lon1)
min_dist, min_city = None, None
for city, lat2, lon2 in CITIES:
dist = _dist(lat1, lon1, lat2, lon2)
if min_dist is None or dist < min_dist:
min_dist, min_city = dist, city
return min_dist, min_city | 4227e357f41619b6e2076bdcf3bb67b92daa9c4a | 10,636 |
def get_previous_term():
"""
Returns a uw_sws.models.Term object,
for the previous term.
"""
url = "{}/previous.json".format(term_res_url_prefix)
return Term(data=get_resource(url)) | a261bc9d744f8f0b70ac76ac596f922b63ea9a46 | 10,637 |
from re import T
def used(obj: T) -> T:
"""Decorator indicating that an object is being used.
This stops the UnusedObjectFinder from marking it as unused.
"""
_used_objects.add(obj)
return obj | 33d241fe4a0953352ecad2ba306f915a88500d46 | 10,638 |
import scipy
def fit_double_gaussian(x_data, y_data, maxiter=None, maxfun=5000, verbose=1, initial_params=None):
""" Fitting of double gaussian
Fitting the Gaussians and finding the split between the up and the down state,
separation between the max of the two gaussians measured in the sum of the std.
Args:
x_data (array): x values of the data
y_data (array): y values of the data
maxiter (int): maximum number of iterations to perform
maxfun (int): maximum number of function evaluations to make
verbose (int): set to >0 to print convergence messages
initial_params (None or array): optional, initial guess for the fit parameters:
[A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up]
Returns:
par_fit (array): fit parameters of the double gaussian: [A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up]
initial_params (array): initial guess for the fit parameters, either the ones give to the function, or generated by the function: [A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up]
separation (float): separation between the max of the two gaussians measured in the sum of the std
split (float): value that seperates the up and the down level
"""
def func(params): return _cost_double_gaussian(x_data, y_data, params)
maxsignal = np.percentile(x_data, 98)
minsignal = np.percentile(x_data, 2)
if initial_params is None:
A_dn = np.max(y_data[:int((len(y_data) / 2))])
A_up = np.max(y_data[int((len(y_data) / 2)):])
sigma_dn = (maxsignal - minsignal) * 1 / 20
sigma_up = (maxsignal - minsignal) * 1 / 20
mean_dn = minsignal + 1 / 4 * (maxsignal - minsignal)
mean_up = minsignal + 3 / 4 * (maxsignal - minsignal)
initial_params = np.array([A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up])
par_fit = scipy.optimize.fmin(func, initial_params, maxiter=maxiter, maxfun=maxfun, disp=verbose >= 2)
# separation is the difference between the max of the gaussians devided by the sum of the std of both gaussians
separation = (par_fit[5] - par_fit[4]) / (abs(par_fit[2]) + abs(par_fit[3]))
# split equal distant to both peaks measured in std from the peak
split = par_fit[4] + separation * abs(par_fit[2])
result_dict = {'parameters initial guess': initial_params, 'separation': separation, 'split': split}
return par_fit, result_dict | 65a54120e2d244301d36d0bba1e25fc711a9d6bb | 10,639 |
def _set_advanced_network_attributes_of_profile(config, profile):
"""
Modify advanced network attributes of profile.
@param config: current configparser configuration.
@param profile: the profile to set the attribute in.
@return: configparser configuration.
"""
config = _set_attribute_of_profile(
config, profile, 'vpc_offering_id', 'VPC offering id', ''
)
return config | f5254f5f055865bf43f0e97f2dcf791bbbe61011 | 10,640 |
def ring_bond_equal(b1, b2, reverse=False):
"""Check if two bonds are equal.
Two bonds are equal if the their beginning and end atoms have the same symbol and
formal charge. Bond type not considered because all aromatic (so SINGLE matches DOUBLE).
Parameters
----------
b1 : rdkit.Chem.rdchem.Bond
An RDKit bond object.
b2 : rdkit.Chem.rdchem.Bond
An RDKit bond object.
reverse : bool
Whether to interchange the role of beginning and end atoms of the second
bond in comparison.
Returns
-------
bool
Whether the two bonds are equal.
"""
b1 = (b1.GetBeginAtom(), b1.GetEndAtom())
if reverse:
b2 = (b2.GetEndAtom(), b2.GetBeginAtom())
else:
b2 = (b2.GetBeginAtom(), b2.GetEndAtom())
return atom_equal(b1[0], b2[0]) and atom_equal(b1[1], b2[1]) | e0c5ab25d69f5770dcf58dd284519b3ed593ad33 | 10,642 |
import _warnings
def survey_aligned_velocities(od):
"""
Compute horizontal velocities orthogonal and tangential to a survey.
.. math::
(v_{tan}, v_{ort}) = (u\\cos{\\phi} + v\\sin{\\phi},
v\\cos{\\phi} - u\\sin{\\phi})
Parameters
----------
od: OceanDataset
oceandataset used to compute
Returns
-------
ds: xarray.Dataset
| rot_ang_Vel: Angle to rotate geographical
to survey aligned velocities
| tan_Vel: Velocity component tangential to survey
| ort_Vel: Velocity component orthogonal to survey
See Also
--------
subsample.survey_stations
"""
# Check parameters
_check_instance({'od': od}, 'oceanspy.OceanDataset')
if 'station' not in od._ds.dims:
raise ValueError('oceandatasets must be subsampled using'
' `subsample.survey_stations`')
# Get zonal and meridional velocities
var_list = ['lat', 'lon']
try:
# Add missing variables
varList = ['U_zonal', 'V_merid'] + var_list
od = _add_missing_variables(od, varList)
# Extract variables
U = od._ds['U_zonal']
V = od._ds['V_merid']
except Exception as e:
# Assume U=U_zonal and V=V_zonal
_warnings.warn(("\n{}"
"\nAssuming U=U_zonal and V=V_merid."
"\nIf you are using curvilinear coordinates,"
" run `compute.geographical_aligned_velocities`"
" before `subsample.survey_stations`").format(e),
stacklevel=2)
# Add missing variables
varList = ['U', 'V'] + var_list
od = _add_missing_variables(od, varList)
# Extract variables
U = od._ds['U']
V = od._ds['V']
# Extract varibles
lat = _np.deg2rad(od._ds['lat'])
lon = _np.deg2rad(od._ds['lon'])
# Extract grid
grid = od._grid
# Message
print('Computing survey aligned velocities.')
# Compute azimuth
# Translated from matlab:
# https://www.mathworks.com/help/map/ref/azimuth.html
az = _np.arctan2(_np.cos(lat[1:]).values
* _np.sin(grid.diff(lon, 'station')),
_np.cos(lat[:-1]).values * _np.sin(lat[1:]).values
- _np.sin(lat[:-1]).values
* _np.cos(lat[1:]).values
* _np.cos(grid.diff(lon, 'station')))
az = grid.interp(az, 'station', boundary='extend')
az = _xr.where(_np.rad2deg(az) < 0, _np.pi*2 + az, az)
# Compute rotation angle
rot_ang_rad = _np.pi/2 - az
rot_ang_rad = _xr.where(rot_ang_rad < 0,
_np.pi*2 + rot_ang_rad, rot_ang_rad)
rot_ang_deg = _np.rad2deg(rot_ang_rad)
rot_ang_Vel = rot_ang_deg
long_name = 'Angle to rotate geographical to survey aligned velocities'
rot_ang_Vel.attrs['long_name'] = long_name
rot_ang_Vel.attrs['units'] = 'deg (+: counterclockwise)'
# Rotate velocities
tan_Vel = U*_np.cos(rot_ang_rad) + V*_np.sin(rot_ang_rad)
tan_Vel.attrs['long_name'] = 'Velocity component tangential to survey'
if 'units' in U.attrs:
units = U.attrs['units']
else:
units = ' '
tan_Vel.attrs['units'] = ('{} '
'(+: flow towards station indexed'
' with higher number)'
''.format(units))
ort_Vel = V*_np.cos(rot_ang_rad) - U*_np.sin(rot_ang_rad)
ort_Vel.attrs['long_name'] = 'Velocity component orthogonal to survey'
if 'units' in V.attrs:
units = V.attrs['units']
else:
units = ' '
ort_Vel.attrs['units'] = ('{} '
'(+: flow keeps station indexed'
' with higher number to the right)'
''.format(units))
# Create ds
ds = _xr.Dataset({'rot_ang_Vel': rot_ang_Vel,
'ort_Vel': ort_Vel,
'tan_Vel': tan_Vel}, attrs=od.dataset.attrs)
return _ospy.OceanDataset(ds).dataset | c506f8ca5db1ed6045ac02fb1988900f0ae10451 | 10,643 |
def insertion_sort(numbers):
"""
At worst this is an O(n2) algorithm
At best this is an O(n) algorithm
"""
for index in xrange(1, len(numbers)):
current_num = numbers[index]
current_pos = index
while current_pos > 0 and numbers[current_pos - 1] > current_num:
numbers[current_pos] = numbers[current_pos - 1]
current_pos = current_pos - 1
numbers[current_pos] = current_num
return numbers | d32a73b156f8b469cfcbdda70f349c7f3173d6a9 | 10,644 |
def ratio_shimenreservoir_to_houchiweir():
"""
Real Name: Ratio ShiMenReservoir To HouChiWeir
Original Eqn: Sum Allocation ShiMenReservoir To HouChiWeir/Sum Allcation From ShiMenReservoir
Units: m3/m3
Limits: (None, None)
Type: component
"""
return sum_allocation_shimenreservoir_to_houchiweir() / sum_allcation_from_shimenreservoir() | e49969f53d6641a02b6cea5d3010ac34eb0739fd | 10,645 |
import torch
def torch_profiler_full(func):
"""
A decorator which will run the torch profiler for the decorated function,
printing the results in full.
Note: Enforces a gpu sync point which could slow down pipelines.
"""
@wraps(func)
def wrapper(*args, **kwargs):
with torch.autograd.profiler.profile(use_cuda=True) as prof:
result = func(*args, **kwargs)
print(prof, flush=True)
return result
return wrapper | 7a92eb75d0131c6d151c9908fdcf2e84f6499468 | 10,646 |
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
# Arguments
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
Output tensor.
# Raises
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
data_format = normalize_data_format(data_format)
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError('Unexpected bias dimensions %d, '
'expect to be 1 or %d dimensions'
% (len(bias_shape), ndim(x)))
if ndim(x) == 5:
if len(bias_shape) == 1:
new_shape = (1, 1, 1, 1, bias_shape[0])
else:
new_shape = (1,) + bias_shape
new_shape = transpose_shape(new_shape, data_format, spatial_axes=(1, 2, 3))
x += reshape(bias, new_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
if _has_nchw_support():
x = tf.nn.bias_add(x, bias,
data_format='NCHW')
else:
x += reshape(bias, (1, bias_shape[0], 1, 1))
else:
x += reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = tf.nn.bias_add(x, bias,
data_format='NHWC')
else:
x += reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if len(bias_shape) == 1:
new_shape = (1, 1, bias_shape[0])
else:
new_shape = (1,) + bias_shape
new_shape = transpose_shape(new_shape, data_format, spatial_axes=(1,))
x += reshape(bias, new_shape)
else:
x = tf.nn.bias_add(x, bias)
return x | 1b783bbd6f685be336b565d7e5db9c5aa91a1f16 | 10,647 |
def get_news_with_follow(request, user_id):
"""
获取用户关注类型的前30条,未登录300未登录
:param request: 请求对象
:return: Json数据
"""
data = {}
try:
user = User.objects.get(pk=user_id)
follow_set = user.follow_type.value_list('id').all()
follow_list = [x[0] for x in follow_set]
news_set = NewsArticle.objects.filter(type_id__in=follow_list).order_by('-publish_time')[:30]
except db.Error:
data['code'] = 400
data['msg'] = '服务器忙,请稍后再试'
return JsonResponse(data)
except ObjectDoesNotExist:
data['code'] = 505
data['msg'] = '用户不存在'
return JsonResponse(data)
news_list = []
for news in news_set:
item = {
'id': news.id,
'title': news.title,
'type': news.type.name,
'publish_time': news.publish_time
}
news_list.append(item)
data['code'] = 200
data['msg'] = '请求成功'
data['news_list'] = news_list
return JsonResponse(data) | 41e9c8cb20c9c1757a8633d584f738b6a64e4f2b | 10,648 |
def trigger():
"""Trigger salt-api call."""
data = {'foo': 'bar'}
return request('/hook/trigger', data=data) | 6aa469468711c3c94e0b5a20d9825fc9c0a73d83 | 10,650 |
from typing import List
import math
def fitness_function(cams: List[Coord], pop: List[Coord]) -> int:
"""
Function to calculate number of surveilled citizens.
Check if all the cameras can see them, if any can score increases
"""
score = []
for cit in pop:
test = False
for cam in cams:
if (
math.sqrt(((cam[0] - cit[0]) ** 2) + ((cam[1] - cit[1]) ** 2))
<= view_radius
):
test = True
score.append(test)
return score.count(True) | d10c02a7b182a8c38d8db37f13aec5b4c9def593 | 10,651 |
import requests
def scrape_opening_hours():
""""scrape opening hours from https://www.designmuseumgent.be/bezoek"""
r = requests.get("https://www.designmuseumgent.be/bezoek")
data = r.text
return data | 297a35f3bc4e10d453da495e031fae5ce79ca643 | 10,652 |
import torch
def _demo_mm_inputs(input_shape=(1, 3, 256, 256)):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
target = np.zeros([N, 17, H // 32, W // 32], dtype=np.float32)
mask = np.ones([N, H // 32, W // 32], dtype=np.float32)
joints = np.zeros([N, 30, 17, 2], dtype=np.float32)
img_metas = [{
'image_file':
'test.jpg',
'aug_data': [torch.zeros(1, 3, 256, 256)],
'test_scale_factor': [1],
'base_size': (256, 256),
'center':
np.array([128, 128]),
'scale':
np.array([1.28, 1.28]),
'flip_index':
[0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
} for _ in range(N)]
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'target': [torch.FloatTensor(target)],
'mask': [torch.FloatTensor(mask)],
'joints': [torch.FloatTensor(joints)],
'img_metas': img_metas
}
return mm_inputs | abef4e006fe6e530c5ca372904a40eecc3dbb5b7 | 10,653 |
import random
def compute_one_epoch_baseline():
"""
Function to compute the performance of a simple one epoch baseline.
:return: a line to display (string reporting the experiment results)
"""
best_val_obj_list = []
total_time_list = []
for nb201_random_seed in nb201_random_seeds:
for random_seed in random_seeds:
# randomly sample 256 configurations for the given dataset and NASBench201 seed
# use the same seeds as for our other experiments
random.seed(random_seed)
cfg_list = random.sample(
range(len(df_dict[nb201_random_seed][dataset_name])), 256
)
selected_subset = df_dict[nb201_random_seed][dataset_name].iloc[cfg_list]
# find configuration with the best performance after doing one epoch
max_idx = selected_subset["val_acc_epoch_0"].argmax()
best_configuration = selected_subset.iloc[max_idx]
# find the best validation accuracy of the selected configuration
# as that is the metric that we compare
best_val_obj = best_configuration[epoch_names].max()
# we also need to calculate the time it took for this
# taking into account the number of workers
total_time = selected_subset["eval_time_epoch"].sum() / n_workers
best_val_obj_list.append(best_val_obj)
total_time_list.append(total_time)
line = " & {:.2f} $\pm$ {:.2f}".format(
np.mean(best_val_obj_list), np.std(best_val_obj_list)
)
line += " & {:.1f}h $\pm$ {:.1f}h".format(
np.mean(total_time_list) / 3600, np.std(total_time_list) / 3600
)
line += " & {:.1f}x".format(reference_time / np.mean(total_time_list))
line += " & 1.0 $\pm$ 0.0"
return line | 1bc3b03d49f0bbb8e2213acb31c64367b577aed2 | 10,654 |
import string
import random
def generate_random_string( length ):
"""Generate a random string of a given length containing uppercase and lowercase letters, digits and ASCII punctuation."""
source = string.ascii_lowercase + string.ascii_uppercase + string.digits + string.punctuation
return ''.join( random.choice( source ) for i in range( length ) ) | 9bb1ee7e21f27231e498f48bff505d963565f582 | 10,655 |
from typing import Mapping
from typing import Sequence
def pretty_table(rows, header=None):
"""
Returns a string with a simple pretty table representing the given rows.
Rows can be:
- Sequences such as lists or tuples
- Mappings such as dicts
- Any object with a __dict__ attribute (most plain python objects) which is
equivalent to passing the __dict__ directly.
If no header is given then either all or none of the rows must be sequences
to ensure the correct order. If there are no sequences then the header will be
derived from the keys of the mappings.
>>> print(pretty_table([['a', 'hello', 'c', 1], ['world', 'b', 'd', 2]]))
a | hello | c | 1
world | b | d | 2
>>> print(pretty_table([['a', 'hello', 'c', 1], ['world', 'b', 'd', 2]], header='col1 col2 col3 col4'))
col1 | col2 | col3 | col4
---------------------------
a | hello | c | 1
world | b | d | 2
>>> print(pretty_table([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]))
a | b
-----
1 | 2
3 | 4
>>> class C(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
...
>>> print(pretty_table([{'a': 1, 'b': 2}, C(3, 4), [5, 6]], header=['b', 'a']))
b | a
-----
2 | 1
4 | 3
5 | 6
>>> print(pretty_table([{'a': 1, 'b': 2}, C(3, 4), [5, 6]]))
Traceback (most recent call last):
...
ValueError: Cannot mix sequences and other types of rows without specifying a header
>>> print(pretty_table([[1, 2], [3, 4, 5]]))
Traceback (most recent call last):
...
ValueError: Mismatched lengths.
First row (len = 2):
[1, 2]
Current row (len = 3):
[3, 4, 5]
>>> print(pretty_table([{'a': 1, 'b': 2}], header='c d'))
Traceback (most recent call last):
....
KeyError: "Tried to access 'c', only keys are: ['a', 'b']"
"""
rows2 = []
if header:
header = ensure_list_if_string(header)
rows2.insert(0, header)
row_type = ['any']
else:
header = []
row_type = [None]
def require_type(t):
if row_type[0] not in (None, t, 'any'):
raise ValueError('Cannot mix sequences and other types of rows without specifying a header')
if row_type[0] is None:
row_type[0] = t
def handle_dict(d):
require_type('mapping')
if not header:
header[:] = sorted(d.keys())
rows2.insert(0, header)
return [helpful_error_dict_get(d, key) for key in header]
for row in rows:
if isinstance(row, Mapping):
row = handle_dict(row)
elif isinstance(row, Sequence):
require_type('sequence')
if rows2 and len(row) != len(rows2[0]):
raise ValueError('Mismatched lengths.\n'
'First row (len = %s):\n%s\n'
'Current row (len = %s):\n%s' %
(len(rows2[0]), rows2[0], len(row), row))
else:
row = handle_dict(row.__dict__)
rows2.append(row)
rows = [[str(cell) for cell in row] for row in rows2]
widths = [max(len(row[i]) for row in rows) for i in range(len(rows[0]))]
lines = [' | '.join(cell.ljust(width) for cell, width in zip(row, widths)).strip()
for row in rows]
if header:
lines.insert(1, '-' * len(lines[0]))
return '\n'.join(lines) | 1b4707932b27277ef22f17631e7a5778a38f99eb | 10,657 |
def interpolate_trajectory(world_map, waypoints_trajectory, hop_resolution=1.0):
"""
Given some raw keypoints interpolate a full dense trajectory to be used by the user.
Args:
world: an reference to the CARLA world so we can use the planner
waypoints_trajectory: the current coarse trajectory
hop_resolution: is the resolution, how dense is the provided trajectory going to be made
Return:
route: full interpolated route both in GPS coordinates and also in its original form.
"""
dao = GlobalRoutePlannerDAO(world_map, hop_resolution)
grp = GlobalRoutePlanner(dao)
grp.setup()
# Obtain route plan
route = []
for i in range(len(waypoints_trajectory) - 1): # Goes until the one before the last.
waypoint = waypoints_trajectory[i]
waypoint_next = waypoints_trajectory[i + 1]
interpolated_trace = grp.trace_route(waypoint, waypoint_next)
for wp_tuple in interpolated_trace:
route.append((wp_tuple[0].transform, wp_tuple[1]))
return route | df544616954868aaa25c86b50420202bea860d9b | 10,658 |
def import_data(filepath="/home/vagrant/countries/NO.txt", mongodb_url="mongodb://localhost:27017"):
"""
Import the adress data into mongodb
CLI Example:
salt '*' mongo.import_data /usr/data/EN.txt
"""
client = MongoClient(mongodb_url)
db = client.demo
address_col = db.address
#Delete collection if present
print("Dropping collection of addresses")
address_col.delete_many({})
#Create compound indices for full text search
address_col.create_index([
("country_code", TEXT),
("postal_code", TEXT),
("place_name", TEXT),
("admin_name1", TEXT),
("admin_name2", TEXT),
("admin_name3", TEXT),
])
# Split line on the tab character since this is the delimiter.
for line in _read_file(filepath):
parts = line.split("\t")
if parts and len(parts) >= 12:
address = {
"country_code": parts[0],
"postal_code": parts[1],
"place_name": parts[2],
"admin_name1": parts[3],
"admin_code1": parts[4],
"admin_name2": parts[5],
"admin_code2": parts[6],
"admin_name3": parts[7],
"admin_code3": parts[8],
"latitude": parts[9],
"longitude": parts[10],
"accuracy": parts[11].strip()
}
address_col.insert(address)
else:
log.error("Element has to few parts to parse")
return "Done importing all data" | 8f80343c60000a8ab988c02bac54e2f748e346b9 | 10,659 |
import async_timeout
import requests
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Enphase Envoy sensor."""
ip_address = config[CONF_IP_ADDRESS]
monitored_conditions = config[CONF_MONITORED_CONDITIONS]
name = config[CONF_NAME]
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
_LOGGER.info("Envoy async_setup_platform called")
f = EnvoyReaderFactory(host=ip_address, username=username, password=password)
# The factory will return a reader based on the SW/FW version found in info.xml
envoy_reader = await f.get_reader()
entities = []
async def async_update_data():
try:
async with async_timeout.timeout(10):
return await envoy_reader.get_data()
except requests.exceptions.HTTPError as err:
raise UpdateFailed(f"Error communicating with API: {err}")
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="EnphaseEnvoy",
update_method=async_update_data,
update_interval= timedelta(seconds=30),
)
# Do an initial data collection so the list with inverters is filled
await coordinator.async_refresh()
# Iterate through the list of sensors configured
for condition in monitored_conditions:
if condition == "inverters":
# The initial data collection made sure we know all inverters that are available at this point
for inverter in coordinator.data['inverters']:
entities.append(
EnvoyInverter(
coordinator,
inverter['serial_number'],
envoy_reader,
condition,
f"{name}{SENSORS[condition][0]} {inverter['serial_number']}",
SENSORS[condition][1],
SENSORS[condition][2],
SENSORS[condition][3]
)
)
else:
entities.append(
Envoy(
coordinator,
coordinator.data['serial_number'],
envoy_reader,
condition,
f"{name}{SENSORS[condition][0]}",
SENSORS[condition][1],
SENSORS[condition][2],
SENSORS[condition][3]
)
)
async_add_entities(entities) | 07e762a8fbcc987b57d38bc8a10d3f51e6fa58a4 | 10,660 |
from .register import PIPING_SIGNS
from .verb import Verb
import ast
def _get_piping_verb_node(calling_node: ast.Call) -> ast.Call:
"""Get the ast node that is ensured the piping verb call
Args:
calling_node: Current Call node
Returns:
The verb call node if found, otherwise None
"""
# check if we have the piping node (i.e. >>)
child = calling_node
parent = getattr(child, "parent", None)
token = PIPING_SIGNS[Verb.CURRENT_SIGN].token
while parent:
if (
# data >> verb(...)
(isinstance(parent, ast.BinOp) and parent.right is child)
or
# data >>= verb(...)
(isinstance(parent, ast.AugAssign) and parent.value is child)
) and isinstance(parent.op, token):
return child
child = parent
parent = getattr(parent, "parent", None)
return None | 2f6be9b382f2bf2e31d39ff9682f5b26618aa1af | 10,661 |
def slot(**kwargs):
"""Creates a SlotConfig instance based on the arguments.
Args:
**kwargs: Expects the following keyed arguments.
in_dist: Distribution for inbound in msec. Optional
in_max_bytes: Optional. Ignored when in_dist is missing.
in_max_pkts: Optional. Ignored when in_dist is missing.
out_dist: Distribution for outbound in msec. Optional
At least one of in_dist and out_dist must be available.
out_max_bytes: Optional. Ignored when out_dist is missing.
out_max_pkts: Optional. Ignored when out_dist is missing.
Returns:
The SlotConfig instance.
Raises:
ValueError: When both in_dist and out_dist are missing.
When an unexpected key is passed.
"""
expected_keys = {'in_dist', 'in_max_bytes', 'in_max_pkts', 'out_dist',
'out_max_bytes', 'out_max_pkts'}
if any(set(kwargs) - expected_keys):
raise ValueError('unexpected args: %s' %
','.join(set(kwargs) - expected_keys))
in_slot = None
out_slot = None
if 'in_dist' in kwargs:
in_slot = Slot(
kwargs['in_dist'],
kwargs['in_max_bytes'] if 'in_max_bytes' in kwargs else 0,
kwargs['in_max_pkts'] if 'in_max_pkts' in kwargs else 0)
if 'out_dist' in kwargs:
out_slot = Slot(
kwargs['out_dist'],
kwargs['out_max_bytes'] if 'out_max_bytes' in kwargs else 0,
kwargs['out_max_pkts'] if 'out_max_pkts' in kwargs else 0)
if not bool(in_slot or out_slot):
raise ValueError('in_dist or out_dist must be defined')
return SlotConfig(in_slot, out_slot) | 4b26f7a805b88a7a6bd03f7d23db7a14d7979eeb | 10,662 |
def get_agent_type(player):
""" Prompts user for info as to the type of agent to be created """
print('There are two kinds of Agents you can initialise.')
print(' 1 - <Human> - This would be a totally manually operated agent.')
print(' You are playing the game yourself.')
print(' 2 - <Random> - This is an agent who simply makes totally random moves.')
print(' They select from the set of all legal moves.')
# print(' 3 - <Engine> - This is an agent which selects moves on the basis of some')
# print(' pre-programmed algorithm.')
print(f'\nWhich type of agent should {player} be?')
while True:
result = input(' : ')
if result.isalpha(): # check response is all letters
result = result.lower() # make them all lowercase
if result.lower() == 'human':
agent_type = result.capitalize()
break
elif result.lower() == 'random':
agent_type = result.capitalize()
break
# elif result.lower() == 'engine':
# not_implemented('Engine')
# continue
elif result.lower() in ('close', 'quit', 'exit', 'no'):
exit_program()
elif result.isnumeric():
if result == '1':
agent_type = 'Human'
break
elif result == '2':
agent_type = 'Random'
break
# elif result == '3':
# not_implemented('Engine')
# continue
agent_name = player
print(f'And their name? Typing nothing will use the default name: {player}')
while True:
result = input(' : ')
if result == '':
break
elif result.isalnum():
if result.lower() in ('close', 'quit', 'exit', 'no'):
exit_program()
agent_name = result
break
else:
print('\n Can only include letters or numbers.\n')
return agent_type, agent_name | 3d0fce9faafaa6c993cb2b5b54a1480268c22ab3 | 10,663 |
def _try_match_and_transform_pattern_1(reduce_op, block) -> bool:
"""
Identify the pattern:
y = gamma * (x - mean) / sqrt(variance + epsilon) + beta
y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)])
x --> reduce_mean --> sub --> square --> reduce_mean --> add(epsilon) --> rsqrt
| | ^ |
| | | V
|----------------------- mul (gamma)
| | |
| | --------|---------
| | | |
| | | V
| |----------------------------------------------------------------> mul
| | |
| V |
|--------------------------------------------------------------> mul |
| V
| sub (beta) --> add --> [...]
| ^
|-------------------------------
This pattern corresponds to either layer_norm or instance_norm.
It is instance_norm if all of the following are true:
- input is rank 4
- axes of reduce_mean is [-2, -1] or [-3, -2]
(when [-3, -2], a channel first to channel last transpose would be inserted)
- gamma and beta are rank 1, after squeeze
It is layer_norm if all of the following are true:
- axes is either [-1] or [-1, -2] or [-1, -2, -3] and so on
- rank of gamma and beta is equal to the length of the axes
"""
ops_to_remove = []
root_var = reduce_op.x
if root_var.shape is None:
return False
# check that root_var feeds into exactly 3 ops
if len(list(root_var.child_ops)) != 3:
return False
if root_var.op is not None and not _check_child_op_types(
root_var.op, child_op_types=["reduce_mean", "sub", "mul"]
):
return False
# check 1st reduce_mean op
if not _check_reduce_op(reduce_op):
return False
ops_to_remove.append(reduce_op)
# check 1st sub op
if not _check_child_op_types(reduce_op, ["sub", "mul"], check_order=False):
return False
child_ops_reduce_mean = list(reduce_op.outputs[0].child_ops)
op_a = child_ops_reduce_mean[0]
op_b = child_ops_reduce_mean[1]
sub_op1 = op_a if op_a.op_type == "sub" else op_b
if not (sub_op1.x == root_var and sub_op1.y == reduce_op.outputs[0]):
return False
ops_to_remove.append(sub_op1)
# check square op
square_op = _try_get_child_op_type(sub_op1, "square")
if square_op is None:
return False
ops_to_remove.append(square_op)
# check second reduce mean
reduce_op2 = _try_get_child_op_type(square_op, "reduce_mean")
if not _check_reduce_op(reduce_op2):
return False
ops_to_remove.append(reduce_op2)
# check add op (with epsilon)
add_op1 = _try_get_child_op_type(reduce_op2, "add")
if add_op1 is None:
return False
epsilon_var = add_op1.y if add_op1.x == reduce_op2.outputs[0] else add_op1.x
if epsilon_var.val is None or len(epsilon_var.val.shape) != 0:
return False # must be scalar
ops_to_remove.append(add_op1)
# check rsqrt
rsqrt_op = _try_get_child_op_type(add_op1, "rsqrt")
if rsqrt_op is None:
return False
ops_to_remove.append(rsqrt_op)
# check mul (gamma)
mul_op1 = _try_get_child_op_type(rsqrt_op, "mul")
if mul_op1 is None:
return False
gamma_var = mul_op1.y if mul_op1.x == rsqrt_op.outputs[0] else mul_op1.x
if gamma_var.val is None:
return False
ops_to_remove.append(mul_op1)
# check 2 muls after the gamma mul
if not _check_child_op_types(mul_op1, ["mul", "mul"]):
return False
child_ops = list(mul_op1.outputs[0].child_ops)
mul_op2 = child_ops[0]
mul_op3 = child_ops[1]
mul_op2_other_var = mul_op2.x if mul_op2.y == mul_op1.outputs[0] else mul_op2.y
mul_op3_other_var = mul_op3.x if mul_op3.y == mul_op1.outputs[0] else mul_op3.y
if not (
(mul_op2_other_var == root_var and mul_op3_other_var == reduce_op.outputs[0])
or (mul_op2_other_var == reduce_op.outputs[0] and mul_op3_other_var == root_var)
):
return False
if mul_op2_other_var == root_var:
mul_root_op = mul_op2
mul_mean_op = mul_op3
else:
mul_root_op = mul_op3
mul_mean_op = mul_op2
ops_to_remove.append(mul_mean_op)
ops_to_remove.append(mul_root_op)
# check sub with beta
sub_op2 = _try_get_child_op_type(mul_mean_op, "sub")
if sub_op2 is None:
return False
if sub_op2.y != mul_mean_op.outputs[0]:
return False
beta_var = sub_op2.x
if beta_var.val is None:
return False
ops_to_remove.append(sub_op2)
# check last add op
add_op2 = _try_get_child_op_type(sub_op2, "add")
if add_op2 is None:
return False
if not (add_op2.x == mul_root_op.outputs[0] or add_op2.y == mul_root_op.outputs[0]):
return False
ops_to_remove.append(add_op2)
return _try_apply_transform(
reduce_op, block, gamma_var, beta_var, epsilon_var, add_op2, ops_to_remove
) | f1baecfc53daf731c5b518aadcc11c88508258e3 | 10,664 |
import click
def cli_resize(maxsize):
"""Resize images to a maximum side length preserving aspect ratio."""
click.echo("Initializing resize with parameters {}".format(locals()))
def _resize(images):
for info, image in images:
yield info, resize(image, maxsize)
return _resize | f0695940531c45a88ff1722c002dacc6103962e0 | 10,665 |
import numpy
def _fetch_object_array(cursor):
"""
_fetch_object_array() fetches arrays with a basetype that is not considered
scalar.
"""
arrayShape = cursor_get_array_dim(cursor)
# handle a rank-0 array by converting it to
# a 1-dimensional array of size 1.
if len(arrayShape) == 0:
arrayShape.append(1)
# now create the (empty) array of the correct type and shape
array = numpy.empty(dtype=object,shape=arrayShape)
# goto the first element
cursor_goto_first_array_element(cursor)
# loop over all elements excluding the last one
arraySizeMinOne = array.size - 1
for i in range(arraySizeMinOne):
array.flat[i] = _fetch_subtree(cursor)
cursor_goto_next_array_element(cursor)
# final element then back tp parent scope
array.flat[arraySizeMinOne] = _fetch_subtree(cursor)
cursor_goto_parent(cursor)
return array | 7c84306c0b84a126f401e51bac5896203357380a | 10,666 |
def sldParse(sld_str):
"""
Builds a dictionary from an SldStyle string.
"""
sld_str = sld_str.replace("'", '"').replace('\"', '"')
keys = ['color', 'label', 'quantity', 'opacity']
items = [el.strip() for el in sld_str.split('ColorMapEntry') if '<RasterSymbolizer>' not in el]
sld_items = []
for i in items:
tmp = {}
for k in keys:
v = find_between(i, f'{k}="', '"')
if v: tmp[k] = v
sld_items.append(tmp)
return {
'type': find_between(sld_str, 'type="', '"'),
'extended': find_between(sld_str, 'extended="', '"'),
'items': sld_items
} | 888a2ee3251a0d1149b478d32ccb88ff0e309ec3 | 10,667 |
def x_ideal(omega, phase):
"""
Generates a complex-exponential signal with given frequency
and phase. Does not contain noise
"""
x = np.empty(cfg.N, dtype=np.complex_)
for n in range(cfg.N):
z = 1j*(omega * (cfg.n0+n) * cfg.Ts + phase)
x[n] = cfg.A * np.exp(z)
return x | 87e4df7cbbfe698e5deb461642de72efb6bfffad | 10,668 |
def _wrap_stdout(outfp):
"""
Wrap a filehandle into a C function to be used as `stdout` or
`stderr` callback for ``set_stdio``. The filehandle has to support the
write() and flush() methods.
"""
def _wrap(instance, str, count):
outfp.write(str[:count])
outfp.flush()
return count
return c_stdstream_call_t(_wrap) | f7d773890b17b18855d2d766bd147c67ac7ade3b | 10,669 |
def svn_fs_apply_textdelta(*args):
"""
svn_fs_apply_textdelta(svn_fs_root_t root, char path, char base_checksum,
char result_checksum, apr_pool_t pool) -> svn_error_t
"""
return _fs.svn_fs_apply_textdelta(*args) | d8d228415d8768ec297415a42113e0eb2463163f | 10,670 |
def find(x):
"""
Find the representative of a node
"""
if x.instance is None:
return x
else:
# collapse the path and return the root
x.instance = find(x.instance)
return x.instance | 5143e9d282fb1988d22273996dae36ed587bd9d2 | 10,671 |
def convert_shape(node, **kwargs):
"""Map MXNet's shape_array operator attributes to onnx's Shape operator
and return the created node.
"""
return create_basic_op_node('Shape', node, kwargs) | 7d4414eac78208b0c35d7ab5a9f21ab70a0947ae | 10,672 |
import time
def get_timestamp(prev_ts=None):
"""Internal helper to return a unique TimeStamp instance.
If the optional argument is not None, it must be a TimeStamp; the
return value is then guaranteed to be at least 1 microsecond later
the argument.
"""
t = time.time()
t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
if prev_ts is not None:
t = t.laterThan(prev_ts)
return t | 89751c53679f11efd26b88609887c4a2ed475418 | 10,674 |
def get_element_as_string(element):
"""
turn xml element from etree to string
:param element:
:return:
"""
return lxml.etree.tostring(element, pretty_print=True).decode() | f62945ff4bdd3bea2562ba52a89d8d01c74e0b10 | 10,675 |
def _select_ports(count, lower_port, upper_port):
"""Select and return n random ports that are available and adhere to the given port range, if applicable."""
ports = []
sockets = []
for i in range(count):
sock = _select_socket(lower_port, upper_port)
ports.append(sock.getsockname()[1])
sockets.append(sock)
for sock in sockets:
sock.close()
return ports | 2f92cb7e4ab26c54bc799369cd950c4269049291 | 10,677 |
def is_solution_quad(var, coeff, u, v):
"""
Check whether `(u, v)` is solution to the quadratic binary diophantine
equation with the variable list ``var`` and coefficient dictionary
``coeff``.
Not intended for use by normal users.
"""
reps = dict(zip(var, (u, v)))
eq = Add(*[j*i.xreplace(reps) for i, j in coeff.items()])
return _mexpand(eq) == 0 | b19d7678c725a41df755352f5af1ce322f3efad7 | 10,678 |
def is_callable(x):
"""Tests if something is callable"""
return callable(x) | 72584deb62ac5e34e69325466236792c5299a51b | 10,679 |
def validate_version_argument(version, hint=4):
""" validate the version argument against the supported MDF versions. The
default version used depends on the hint MDF major revision
Parameters
----------
version : str
requested MDF version
hint : int
MDF revision hint
Returns
-------
valid_version : str
valid version
"""
if version not in SUPPORTED_VERSIONS:
if hint == 2:
valid_version = "2.14"
elif hint == 3:
valid_version = "3.30"
else:
valid_version = "4.10"
message = (
'Unknown mdf version "{}".'
" The available versions are {};"
' automatically using version "{}"'
)
message = message.format(version, SUPPORTED_VERSIONS, valid_version)
logger.warning(message)
else:
valid_version = version
return valid_version | e29342f78236f043b079cf5e4473f6dccb29d35c | 10,680 |
import torch
def roc_auc(probs, labels):
"""
Computes the area under the receiving operator characteristic between output probs
and labels for k classes.
Source: https://github.com/HazyResearch/metal/blob/master/metal/utils.py
args:
probs (tensor) (size, k)
labels (tensor) (size, 1)
"""
probs = torch.nn.functional.softmax(probs, dim=1)
probs = probs.numpy()
# Convert labels to one-hot indicator format, using the k inferred from probs
labels = hard_to_soft(labels, k=probs.shape[1]).numpy()
return skl.roc_auc_score(labels, probs) | 9ae79a4ff5cbf93d2187857c8ac62014c6fa98f0 | 10,681 |
from typing import Collection
def show_collection(request, collection_id):
"""Shows a collection"""
collection = get_object_or_404(Collection, pk=collection_id)
# New attribute to store the list of problems and include the number of submission in each problem
collection.problem_list = collection.problems()
for problem in collection.problem_list:
problem.num_submissions = problem.num_submissions_by_user(request.user)
problem.solved = problem.solved_by_user(request.user)
return render(request, 'collection.html', {'collection': collection}) | a23efc449258839a7d7bfa0c0a73d889a6891a0f | 10,682 |
from typing import Union
from typing import Callable
from typing import List
def alpha(
data: np.ndarray,
delta: Union[Callable[[int, int], float], List[List[float]], str] = "nominal",
):
"""Calculates Krippendorff's alpha coefficient [1, sec. 11.3] for
inter-rater agreement.
[1] K. Krippendorff, Content analysis: An introduction to its
methodology. Sage publications, 2004.
Args:
-----
data: numpy.ndarray
The data matrix, shape (n_raters, n_units). Each cell (i, j)
represents the value assigned to unit j by rater i, or 0
representing no response.
delta: callable, 2-D array-like or str
The delta metric. Default is the nominal metric, which takes the
value 1 in case c != k and 0 otherwise.
"""
# The following implementation was based off the Wikipedia article:
# https://en.wikipedia.org/wiki/Krippendorff%27s_alpha
# Response categories go from 1 to R, 0 represents no response
R = np.max(data)
counts = np.apply_along_axis(lambda x: np.bincount(x, minlength=R + 1), 0, data).T
count_sum = np.sum(counts, 0)
assert len(count_sum) == R + 1
def ordinal(c: int, k: int):
if k < c:
c, k = k, c
s = (
sum(count_sum[g] for g in range(c, k + 1))
- (count_sum[c] + count_sum[k]) / 2
)
return s ** 2
if isinstance(delta, str):
delta = {
"nominal": Deltas.nominal,
"ordinal": ordinal,
"interval": Deltas.interval,
}[delta]
if not callable(delta):
try:
delta[0][0]
except IndexError:
raise TypeError("delta must be either str, callable or 2D array.")
def _delta(c, k):
new_delta = delta
return new_delta[c][k]
delta = _delta
m_u = np.sum(counts[:, 1:], 1)
valid = m_u >= 2
counts = counts[valid]
m_u = m_u[valid]
data = data[:, valid]
n = np.sum(m_u)
n_cku = np.matmul(counts[:, :, None], counts[:, None, :])
for i in range(R + 1):
n_cku[:, i, i] = counts[:, i] * (counts[:, i] - 1)
D_o = 0
for c in range(1, R + 1):
for k in range(1, R + 1):
D_o += delta(c, k) * n_cku[:, c, k]
D_o = np.sum(D_o / (n * (m_u - 1)))
D_e = 0
P_ck = np.bincount(data.flat)
for c in range(1, R + 1):
for k in range(1, R + 1):
D_e += delta(c, k) * P_ck[c] * P_ck[k]
D_e /= n * (n - 1)
return 1 - D_o / D_e | 98c86120287d9d4b2c7f10ad074702c2088ade8d | 10,683 |
def enforce(action, target, creds, do_raise=True):
"""Verifies that the action is valid on the target in this context.
:param creds: user credentials
:param action: string representing the action to be checked, which
should be colon separated for clarity.
Or it can be a Check instance.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary
representing the location of the object e.g.
{'project_id': object.project_id}
:param rule_dict: instance of oslo_policy.policy.Rules, it's
actually a dict, with keys are the actions
to be protected and values are parsed Check trees.
:raises: `exception.Forbidden` if verification fails.
Actions should be colon separated for clarity. For example:
* identity:list_users
"""
init()
# Add the exception arguments if asked to do a raise
extra = {}
if do_raise:
extra.update(exc=exception.ForbiddenAction, service=action[0],
permission=action[1], do_raise=do_raise)
return _ENFORCER.enforce(action, target, creds, **extra) | 16cdefe38bfc56f529a735b8517d94ade7db780d | 10,684 |
from operator import and_
from operator import or_
def query_data(session, agency_code, period, year):
""" Request A file data
Args:
session: DB session
agency_code: FREC or CGAC code for generation
period: The period for which to get GTAS data
year: The year for which to get GTAS data
Returns:
The rows using the provided dates for the given agency.
"""
# set a boolean to determine if the original agency code is frec or cgac
frec_provided = len(agency_code) == 4
tas_gtas = tas_gtas_combo(session, period, year)
# Make a list of FRECs to compare to for 011 AID entries
frec_list = []
if not frec_provided:
frec_list = session.query(FREC.frec_code).select_from(outerjoin(CGAC, FREC, CGAC.cgac_id == FREC.cgac_id)).\
filter(CGAC.cgac_code == agency_code).all()
# Group agencies together that need to be grouped
agency_array = []
if agency_code == '097':
agency_array = ['017', '021', '057', '097']
elif agency_code == '1601':
agency_array = ['1601', '016']
elif agency_code == '1125':
agency_array = ['1125', '011']
# Save the ATA filter
agency_filters = []
if not agency_array:
agency_filters.append(tas_gtas.c.allocation_transfer_agency == agency_code)
else:
agency_filters.append(tas_gtas.c.allocation_transfer_agency.in_(agency_array))
# Save the AID filter
if agency_code == '097' and not frec_provided:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.agency_identifier.in_(agency_array)))
elif not frec_provided:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.agency_identifier == agency_code))
else:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.fr_entity_type == agency_code))
# If we're checking a CGAC, we want to filter on all of the related FRECs for AID 011, otherwise just filter on
# that FREC
if frec_list:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.agency_identifier == '011',
tas_gtas.c.fr_entity_type.in_(frec_list)))
elif not frec_provided:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.agency_identifier == '011',
tas_gtas.c.fr_entity_type == agency_code))
rows = initial_query(session, tas_gtas.c).\
filter(func.coalesce(tas_gtas.c.financial_indicator2, '') != 'F').\
filter(or_(*agency_filters)).\
group_by(tas_gtas.c.allocation_transfer_agency,
tas_gtas.c.agency_identifier,
tas_gtas.c.beginning_period_of_availa,
tas_gtas.c.ending_period_of_availabil,
tas_gtas.c.availability_type_code,
tas_gtas.c.main_account_code,
tas_gtas.c.sub_account_code)
return rows | 0eb856f699eebf95bf10ff2d3dd6c9a72ec0843a | 10,685 |
def vocublary(vec_docs):
""" vocabulary(vec_docs) -> tuple: (int avg_doc_len, updated vec_docs, corpus Vocabulary dictionary {"word": num_docs_have__this_term, ...})
vec_docs = list of documents as dictionaries [{ID:"word_i word_i+1 ..."} , {ID:"word_i word_i+1"}, ...}]
"""
vocabulary = {}
count_vec = [] #used for aggregating doc lengths in a list to determining avg_doc_len
#Extract len of docs anonymously, convert vec_docs values to c(w,d), Create corups Vocabulary as c(d,w)
for key,value in vec_docs.items(): #recall: {key = "doc_ID": value = [list, of, words, in, each, document]}
doc_words = {}
count_vec.append(len(value))
for word in value:
#convert doc word list into dict storing c(w,d) ∈ D
if word in doc_words:
doc_words[word] = doc_words[word] + 1
else:
doc_words[word] = 1
#Next, create vocubulary c(d,w) ∈ Corpus
for word,count in doc_words.items():
if word in vocabulary:
vocabulary[word] = vocabulary[word] + 1
else:
vocabulary[word] = 1
#last convert {ID:[list,of,words]} -> {ID: {dict:1,of:1,word:1,counts:2} }
vec_docs[key] = doc_words
avg_dl = sum(count_vec) / len(count_vec)
return (avg_dl,vocabulary) | 4e6f4df1e36c2fdf3d7d1d20750d74f91a0214b6 | 10,686 |
import warnings
def _build_type(type_, value, property_path=None):
""" Builds the schema definition based on the given type for the given value.
:param type_: The type of the value
:param value: The value to build the schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
for (type_check, builder) in (
(is_enum_type, _build_enum_type),
(is_null_type, _build_null_type),
(is_bool_type, _build_bool_type),
(is_string_type, _build_string_type),
(is_integer_type, _build_integer_type),
(is_number_type, _build_number_type),
(is_array_type, _build_array_type),
(is_object_type, _build_object_type),
):
if type_check(type_):
return builder(value, property_path=property_path)
# NOTE: warning ignores type None (as that is the config var default)
if type_:
warnings.warn(f"unhandled translation for type {type_!r} with value {value!r}")
return {} | 7033e5f8bc5cd5b667f25b8554bfe3296191762f | 10,688 |
def lidar_2darray_to_rgb(array: np.ndarray) -> np.ndarray:
"""Returns a `NumPy` array (image) from a 4 channel LIDAR point cloud.
Args:
array: The original LIDAR point cloud array.
Returns:
The `PyGame`-friendly image to be visualized.
"""
# Get array shapes.
W, H, C = array.shape
assert C == 2
# Select channel.
img = np.c_[array, np.zeros(shape=(W, H, 1))]
# Convert to 8-bit image.
img = 255 * (img / img.max())
return img | 69e2de793b9280b269ac8ab9f3d313e51c932c8c | 10,689 |
from typing import Tuple
from typing import Union
from typing import List
from typing import Dict
import tqdm
import torch
def rollout(dataset: RPDataset,
env: RPEnv,
policy: Policy,
batch_size: int,
num_workers: int = 4,
disable_progress_bar: bool = False,
**kwargs) -> Tuple[Tensor, Union[List, Dict]]:
"""Policy evaluation rollout
Args:
dataset: dataset to evaluate on
env: the routing simulation environment
policy: policy model
batch_size: size of mini-batches
num_workers: num cores to distribute data loading
disable_progress_bar: flag to disable tqdm progress bar
Returns:
tensor of final costs per instance
"""
costs, infos = [], []
for batch in tqdm(
DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=lambda x: x, # identity -> returning simple list of instances
shuffle=False # do not random shuffle data in rollout!
),
disable=disable_progress_bar,
):
with torch.no_grad():
cost, info = eval_episode(batch, env, policy, **kwargs)
costs.append(cost.cpu())
infos.append(info)
env.clear_cache()
return torch.cat(costs, dim=0), infos | 1b88ff01b86d567de2c78d4450825e4fd1120311 | 10,691 |
def regex_ignore_case(term_values):
"""
turn items in list "term_values" to regexes with ignore case
"""
output=[]
for item in term_values:
output.append(r'(?i)'+item)
return output | 5dbf5fba758fe91fb0bbfaed6ab3cfa5f05357eb | 10,692 |
from typing import Callable
def importance_sampling_integrator(function: Callable[..., np.ndarray],
pdf: Callable[..., np.ndarray],
sampler: Callable[..., int],
n: int = 10000,
seed: int = 1
) -> np.array:
"""
Parameters
----------
function : TYPE
DESCRIPTION.
pdf : TYPE
DESCRIPTION.
sampler : TYPE
DESCRIPTION.
n : TYPE, optional
DESCRIPTION. The default is 10000.
seed : TYPE, optional
DESCRIPTION. The default is 1.
Returns
-------
TYPE
DESCRIPTION.
"""
# Set a random seed.
np.random.seed(seed)
# Generate n samples from the probability distribution.
samples = sampler(n)
#ipdb.set_trace()
# Evaluate the function at the samples and divide by the probability
# density of the distribution at those samples.
sampled_values = function(samples) / pdf(samples)
# Add the estimate of the integral to the estimates list.
estimates = np.mean(sampled_values, axis=1) # Altered this for the batching.
# Return the mean of the estimates as the estimate of the integral.
return np.array(estimates) | 14b6abfaa38f37430ec0e9abcd330f1392543978 | 10,693 |
from apex.amp._amp_state import _amp_state
import torch
def r1_gradient_penalty_loss(discriminator,
real_data,
mask=None,
norm_mode='pixel',
loss_scaler=None,
use_apex_amp=False):
"""Calculate R1 gradient penalty for WGAN-GP.
R1 regularizer comes from:
"Which Training Methods for GANs do actually Converge?" ICML'2018
Diffrent from original gradient penalty, this regularizer only penalized
gradient w.r.t. real data.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
mask (Tensor): Masks for inpainting. Default: None.
norm_mode (str): This argument decides along which dimension the norm
of the gradients will be calculated. Currently, we support ["pixel"
, "HWC"]. Defaults to "pixel".
Returns:
Tensor: A tensor for gradient penalty.
"""
batch_size = real_data.shape[0]
real_data = real_data.clone().requires_grad_()
disc_pred = discriminator(real_data)
if loss_scaler:
disc_pred = loss_scaler.scale(disc_pred)
elif use_apex_amp:
_loss_scaler = _amp_state.loss_scalers[0]
disc_pred = _loss_scaler.loss_scale() * disc_pred.float()
gradients = autograd.grad(
outputs=disc_pred,
inputs=real_data,
grad_outputs=torch.ones_like(disc_pred),
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
if loss_scaler:
# unscale the gradient
inv_scale = 1. / loss_scaler.get_scale()
gradients = gradients * inv_scale
elif use_apex_amp:
inv_scale = 1. / _loss_scaler.loss_scale()
gradients = gradients * inv_scale
if mask is not None:
gradients = gradients * mask
if norm_mode == 'pixel':
gradients_penalty = ((gradients.norm(2, dim=1))**2).mean()
elif norm_mode == 'HWC':
gradients_penalty = gradients.pow(2).reshape(batch_size,
-1).sum(1).mean()
else:
raise NotImplementedError(
'Currently, we only support ["pixel", "HWC"] '
f'norm mode but got {norm_mode}.')
if mask is not None:
gradients_penalty /= torch.mean(mask)
return gradients_penalty | d142375d89f39ae2510575d0615a878bd67e9574 | 10,694 |
import json
import re
def visualize(args):
"""Return the visualized output"""
ret = ""
cmd_list = json.load(args.results)['cmd_list']
cmd_list = util.filter_cmd_list(cmd_list, args.labels_to_include, args.labels_to_exclude)
(cmd_list, label_map) = util.translate_dict(cmd_list, args.label_map)
for cmd in cmd_list:
values = []
if 'jobs' in cmd:
for job in cmd['jobs']:
if 'results' in job:
for res in job['results']:
match_list = re.findall(args.parse_regex, res['stdout'])
if res['success'] and len(match_list) > 0:
for match in match_list:
values.append(args.py_type(match))
else:
values.append("N/A")
succeed_values = util.extract_succeed_results(cmd, args.parse_regex, args.py_type)
mean = util.mean(succeed_values)
std = util.standard_deviation(succeed_values)
if args.csv:
sep = args.csv_separator
ret += "%s%s %.4f%s %.4f" % (label_map[cmd['label']], sep, mean, sep, std)
else:
ret += "%s: %s" % (label_map[cmd['label']], values)
if len(succeed_values) > 0:
ret += " %.4f" % mean
ret += " (%.4f)" % std
ret += "\n"
return ret | 8a9e655adfc9713785f96ab487d579a19d9d09b2 | 10,695 |
def send_request(apikey, key_root, data, endpoint):
"""Send a request to the akismet server and return the response."""
url = 'http://%s%s/%s/%s' % (
key_root and apikey + '.' or '',
AKISMET_URL_BASE,
AKISMET_VERSION,
endpoint
)
try:
response = open_url(url, data=url_encode(data))
except:
return
try:
return response.data.strip()
finally:
response.close() | aea5ac8eb0b8b91002e680376c6f2647a631e58c | 10,696 |
def request_set_bblk_trace_options(*args):
"""
request_set_bblk_trace_options(options)
Post a 'set_bblk_trace_options()' request.
@param options (C++: int)
"""
return _ida_dbg.request_set_bblk_trace_options(*args) | 728d7e2a7a0ef0085d4bb72763b2a019d89896ec | 10,697 |
def range_str(values: iter) -> str:
"""
Given a list of integers, returns a terse string expressing the unique values.
Example:
indices = [0, 1, 2, 3, 4, 7, 8, 11, 15, 20]
range_str(indices)
>> '0-4, 7-8, 11, 15 & 20'
:param values: An iterable of ints
:return: A string of unique value ranges
"""
trial_str = ''
values = list(set(values))
for i in range(len(values)):
if i == 0:
trial_str += str(values[i])
elif values[i] - (values[i - 1]) == 1:
if i == len(values) - 1 or values[i + 1] - values[i] > 1:
trial_str += f'-{values[i]}'
else:
trial_str += f', {values[i]}'
# Replace final comma with an ampersand
k = trial_str.rfind(',')
if k > -1:
trial_str = f'{trial_str[:k]} &{trial_str[k + 1:]}'
return trial_str | 85dedc97342b07dcb2a8dda753768309aa31ed43 | 10,698 |
import json
def analyze(request):
"""
利用soar分析SQL
:param request:
:return:
"""
text = request.POST.get('text')
instance_name = request.POST.get('instance_name')
db_name = request.POST.get('db_name')
if not text:
result = {"total": 0, "rows": []}
else:
soar = Soar()
if instance_name != '' and db_name != '':
soar_test_dsn = SysConfig().get('soar_test_dsn')
# 获取实例连接信息
instance_info = Instance.objects.get(instance_name=instance_name)
online_dsn = "{user}:{pwd}@{host}:{port}/{db}".format(user=instance_info.user,
pwd=instance_info.raw_password,
host=instance_info.host,
port=instance_info.port,
db=db_name)
else:
online_dsn = ''
soar_test_dsn = ''
args = {"report-type": "markdown",
"query": '',
"online-dsn": online_dsn,
"test-dsn": soar_test_dsn,
"allow-online-as-test": "false"}
rows = generate_sql(text)
for row in rows:
args['query'] = row['sql'].replace('"', '\\"').replace('`', '').replace('\n', ' ')
cmd_args = soar.generate_args2cmd(args=args, shell=True)
stdout, stderr = soar.execute_cmd(cmd_args, shell=True).communicate()
row['report'] = stdout if stdout else stderr
result = {"total": len(rows), "rows": rows}
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json') | a033774727242783357a35a6608d7154f77bc016 | 10,699 |
import math
def sin(c):
"""
sin(a+x)= sin(a) cos(x) + cos(a) sin(x)
"""
if not isinstance(c,pol): return math.sin(c)
a0,p=c.separate();
lst=[math.sin(a0),math.cos(a0)]
for n in range(2,c.order+1):
lst.append( -lst[-2]/n/(n-1))
return phorner(lst,p) | a6ec312df4362c130343133dae9a09b377f56cf5 | 10,700 |
def _calc_metadata() -> str:
"""
Build metadata MAY be denoted by appending a plus sign
and a series of dot separated identifiers
immediately following the patch or pre-release version.
Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
"""
if not is_appveyor:
return "local-build"
is_pr = PR_NUM in env
assert (PR_NUM in env) == (PR_BRANCH in env)
assert VER in env
if is_pr:
return "{VER}.pr{PR_NUM}-{PR_BRANCH}".format(**env)
else:
if env[BRANCH] != "master":
# Shouldn't happen, since side branches are not built.
return "{VER}.{BRANCH}".format(**env)
else:
return "{VER}".format(**env) | ccbd4912622808b5845d8e30546d6eb27e299342 | 10,701 |
import functools
def authorization_required(func):
"""Returns 401 response if user is not logged-in when requesting URL with user ndb.Key in it
or Returns 403 response if logged-in user's ndb.Key is different from ndb.Key given in requested URL.
"""
@functools.wraps(func)
def decorated_function(*pa, **ka): # pylint: disable=missing-docstring
if auth.is_authorized(ndb.Key(urlsafe=ka['key'])):
return func(*pa, **ka)
if not auth.is_logged_in():
return abort(401)
return abort(403)
return decorated_function | 12c0d645b0b26bf419e413866afaf1b4e7a19869 | 10,702 |
import torch
def pad_col(input, val=0, where='end'):
"""Addes a column of `val` at the start of end of `input`."""
if len(input.shape) != 2:
raise ValueError(f"Only works for `phi` tensor that is 2-D.")
pad = torch.zeros_like(input[:, :1])
if val != 0:
pad = pad + val
if where == 'end':
return torch.cat([input, pad], dim=1)
elif where == 'start':
return torch.cat([pad, input], dim=1)
raise ValueError(f"Need `where` to be 'start' or 'end', got {where}") | 77caa028bb76da922ba12492f077811d2344c2a9 | 10,703 |
from typing import List
import itertools
def seats_found_ignoring_floor(data: List[List[str]], row: int, col: int) -> int:
"""
Search each cardinal direction util we hit a wall or a seat.
If a seat is hit, determine if it's occupied.
"""
total_seats_occupied = 0
cardinal_direction_operations = itertools.product([-1, 0, 1], repeat=2)
for row_modifier, col_modifier in cardinal_direction_operations:
if row_modifier or col_modifier:
total_seats_occupied += next_seat_on_path_occupied(
data, row, col, row_modifier, col_modifier
)
return total_seats_occupied | e5442d757df6304da42f817c975969723ad0abca | 10,704 |
def product_design_space() -> ProductDesignSpace:
"""Build a ProductDesignSpace for testing."""
alpha = RealDescriptor('alpha', lower_bound=0, upper_bound=100, units="")
beta = RealDescriptor('beta', lower_bound=0, upper_bound=100, units="")
gamma = CategoricalDescriptor('gamma', categories=['a', 'b', 'c'])
dimensions = [
ContinuousDimension(alpha, lower_bound=0, upper_bound=10),
ContinuousDimension(beta, lower_bound=0, upper_bound=10),
EnumeratedDimension(gamma, values=['a', 'c'])
]
return ProductDesignSpace(name='my design space', description='does some things', dimensions=dimensions) | 93468cc7aaeb6a6bf7453d2f3e974bc28dece31f | 10,705 |
def compute_percents_of_labels(label):
"""
Compute the ratio/percentage size of the labels in an labeled image
:param label: the labeled 2D image
:type label: numpy.ndarray
:return: An array of relative size of the labels in the image. Indices of the sizes in the array \
is corresponding to the labels in the labeled image. E.g. output [0.2, 0.5, 0.3] means label 0's size \
is 0.2 of the labeled image, label 1' size is 0.5 of the labeled image, and label 2's size is 0.3 of \
the labeled image.
:rtype: numpy.ndarray
"""
# Get the bins of the histogram. Since the last bin of the histogram is [label, label+1]
# We add 1 to the number of different labels in the labeled image when generating bins
num_labels = np.arange(0, len(np.unique(label)) + 1)
# Histogramize the label image and get the frequency array percent_of_dominance
(percent_of_dominance, _) = np.histogram(label, bins=num_labels)
# Convert the dtype of frequency array to float
percent_of_dominance = percent_of_dominance.astype("float")
# Normalized by the sum of frequencies (number of pixels in the labeled image)
percent_of_dominance /= percent_of_dominance.sum()
return percent_of_dominance | 6dfe34b7da38fa17a5aa4e42acc5c812dd126f77 | 10,706 |
def removepara(H,M,Hmin = '1/2',Hmax = 'max',output=-1,kwlc={}):
""" Retrieve lineal contribution to cycle and remove it from cycle.
**H** y **M** corresponds to entire cycle (two branches). I.e. **H**
starts and ends at the same value (or an aproximate value).
El ciclo M vs H se separa en sus dos ramas. H1,M1 y H2,M2, defined by::
H1,M1: curva con dH/dt < 0. El campo decrece con el tiempo.
H2,M2: curva con dH/dt > 0. El campo aumenta con el tiempo.
Con la variable global FIGS = True shows intermediate states of
proceso de determinarion y linear contribution removing.
Figure Shows **Hmin** and **Hmax** positions in the cycle.
output: kind of output, (0 or -1) out.params or (1) out. (v 0.210304)
Note: output is set to -1 as default to achive backward
compatibility. But it should be changed in future to 1.
kwlc = dictionary with kwargs to be passed to lienar contribution.
Returns:
if output = -1: H1,M1,H2,M2,[pendiente,salto,desp]
if output = 1:
returns plain objtect with previous attributes and others.
"""
if PRINT:
print('**********************************************************')
print('removepara ')
print('**********************************************************')
if Hmax == 'max':
Hmax = max(abs(H))
if Hmin == '1/2':
Hmin = 0.5*max(abs(H))
H1,M1,H2,M2 = splitcycle(H,M)
o1 = linealcontribution(H1,M1,[Hmax,Hmin],label='dH/dt < 0',output=output,**kwlc)
o2 = linealcontribution(H2,M2,[Hmax,Hmin],label='dH/dt > 0',output=output,**kwlc)
if output == 1:
p1 = o1.params
p2 = o2.params
elif output == -1:
p1 = o1
p2 = o2
Ms = (p1['Ms'].value + p2['Ms'].value)*0.5
if p1['Ms'].stderr == None or p2['Ms'].stderr == None:
eMs = None
else:
eMs = (p1['Ms'].stderr + p2['Ms'].stderr)*0.5
# Fin de ajustes
if PRINT:
print('slope 1:',p1['Xi'])
print('slope 2:',p2['Xi'])
print('Ms 1 :',p1['Ms'])
print('Ms 2 :',p2['Ms'])
print('Ms :%s +/- %s'%(Ms,eMs))
print('offset 1 :',p1['offset'])
print('offset 2 :',p2['offset'])
print('a 1 :',p1['a'])
print('a 2 :',p2['a'])
print('b 1 :',p1['b'])
print('b 2 :',p2['b'])
# Armamos una pendiente promedio a partir de la obtenida para cada rama.
# Corregimos ambas ramas eliminando esta pendiente.
pend =(p1['Xi']+p2['Xi'])/2.
salto=(p1['Ms']+p2['Ms'])/2.
desp =(p1['offset']+p2['offset'])/2.
M1 = (M1-H1*pend)
M2 = (M2-H2*pend)
if FIGS:
__newfig__()
pyp.plot(H1,M1,'b.-',label = 'dH/dt < 0')
pyp.plot(H2,M2,'r.-',label = 'dH/dt > 0')
pyp.axhline(salto,color = 'k', alpha =0.5)
pyp.axhline(-salto,color= 'k', alpha =0.5)
pyp.legend(loc=0)
if output == 1:
out = ReturnClass()
out.H1 = H1
out.H2 = H2
out.M1 = M1
out.M2 = M2
out.pend = pend
out.desp = desp
out.salto = salto
out.o1 = o1
out.o2 = o2
return out
else:
return H1,M1,H2,M2,[pend,salto,desp] | 1d70c60f60b3ab7b976a0ec12a3541e5a7e53426 | 10,707 |
def flush():
"""
Remove all mine contents of minion.
:rtype: bool
:return: True on success
CLI Example:
.. code-block:: bash
salt '*' mine.flush
"""
if __opts__["file_client"] == "local":
return __salt__["data.update"]("mine_cache", {})
load = {
"cmd": "_mine_flush",
"id": __opts__["id"],
}
return _mine_send(load, __opts__) | fe7d120362393fcb4380473cdaf76e153646644a | 10,708 |
def polygon_to_shapely_polygon_wkt_compat(polygon):
"""
Convert a Polygon to its Shapely Polygon representation but with WKT
compatible coordinates.
"""
shapely_points = []
for location in polygon.locations():
shapely_points.append(location_to_shapely_point_wkt_compat(location))
return shapely.geometry.Polygon(shapely.geometry.LineString(shapely_points)) | 54c889d2071cc8408c2bb4b739a30c3458c80f4c | 10,709 |
import six
def ccd_process(ccd, oscan=None, trim=None, error=False, masterbias=None,
bad_pixel_mask=None, gain=None, rdnoise=None,
oscan_median=True, oscan_model=None):
"""Perform basic processing on ccd data.
The following steps can be included:
* overscan correction
* trimming of the image
* create edeviation frame
* gain correction
* add a mask to the data
* subtraction of master bias
The task returns a processed `ccdproc.CCDData` object.
Parameters
----------
ccd: `ccdproc.CCDData`
Frame to be reduced
oscan: None, str, or, `~ccdproc.ccddata.CCDData`
For no overscan correction, set to None. Otherwise proivde a region
of `ccd` from which the overscan is extracted, using the FITS
conventions for index order and index start, or a
slice from `ccd` that contains the overscan.
trim: None or str
For no trim correction, set to None. Otherwise proivde a region
of `ccd` from which the image should be trimmed, using the FITS
conventions for index order and index start.
error: boolean
If True, create an uncertainty array for ccd
masterbias: None, `~numpy.ndarray`, or `~ccdproc.CCDData`
A materbias frame to be subtracted from ccd.
bad_pixel_mask: None or `~numpy.ndarray`
A bad pixel mask for the data. The bad pixel mask should be in given
such that bad pixels havea value of 1 and good pixels a value of 0.
gain: None or `~astropy.Quantity`
Gain value to multiple the image by to convert to electrons
rdnoise: None or `~astropy.Quantity`
Read noise for the observations. The read noise should be in
`~astropy.units.electron`
oscan_median : bool, optional
If true, takes the median of each line. Otherwise, uses the mean
oscan_model : `~astropy.modeling.Model`, optional
Model to fit to the data. If None, returns the values calculated
by the median or the mean.
Returns
-------
ccd: `ccdproc.CCDData`
Reduded ccd
"""
# make a copy of the object
nccd = ccd.copy()
# apply the overscan correction
if isinstance(oscan, ccdproc.CCDData):
nccd = ccdproc.subtract_overscan(nccd, overscan=oscan,
median=oscan_median,
model=oscan_model)
elif isinstance(oscan, six.string_types):
nccd = ccdproc.subtract_overscan(nccd, fits_section=oscan,
median=oscan_median,
model=oscan_model)
elif oscan is None:
pass
else:
raise TypeError('oscan is not None, a string, or CCDData object')
# apply the trim correction
if isinstance(trim, six.string_types):
nccd = ccdproc.trim_image(nccd, fits_section=trim)
elif trim is None:
pass
else:
raise TypeError('trim is not None or a string')
# create the error frame
if error and gain is not None and rdnoise is not None:
nccd = ccdproc.create_deviation(nccd, gain=gain, rdnoise=rdnoise)
elif error and (gain is None or rdnoise is None):
raise ValueError(
'gain and rdnoise must be specified to create error frame')
# apply the bad pixel mask
if isinstance(bad_pixel_mask, np.ndarray):
nccd.mask = bad_pixel_mask
elif bad_pixel_mask is None:
pass
else:
raise TypeError('bad_pixel_mask is not None or numpy.ndarray')
# apply the gain correction
if isinstance(gain, u.quantity.Quantity):
nccd = ccdproc.gain_correct(nccd, gain)
elif gain is None:
pass
else:
raise TypeError('gain is not None or astropy.Quantity')
# test subtracting the master bias
if isinstance(masterbias, ccdproc.CCDData):
nccd = nccd.subtract(masterbias)
elif isinstance(masterbias, np.ndarray):
nccd.data = nccd.data - masterbias
elif masterbias is None:
pass
else:
raise TypeError(
'masterbias is not None, numpy.ndarray, or a CCDData object')
return nccd | 610a53693ff84ba2e1a68662dd0a19e55228c129 | 10,710 |
def get_role_keyids(rolename):
"""
<Purpose>
Return a list of the keyids associated with 'rolename'.
Keyids are used as identifiers for keys (e.g., rsa key).
A list of keyids are associated with each rolename.
Signing a metadata file, such as 'root.json' (Root role),
involves signing or verifying the file with a list of
keys identified by keyid.
<Arguments>
rolename:
An object representing the role's name, conformant to 'ROLENAME_SCHEMA'
(e.g., 'root', 'snapshot', 'timestamp').
<Exceptions>
tuf.FormatError, if 'rolename' does not have the correct object format.
tuf.UnknownRoleError, if 'rolename' cannot be found in the role database.
tuf.InvalidNameError, if 'rolename' is incorrectly formatted.
<Side Effects>
None.
<Returns>
A list of keyids.
"""
# Raises tuf.FormatError, tuf.UnknownRoleError, or tuf.InvalidNameError.
_check_rolename(rolename)
roleinfo = _roledb_dict[rolename]
return roleinfo['keyids'] | 4888a09740560d760bfffe9eecd50bfa67ff0613 | 10,711 |
def _DX(X):
"""Computes the X finite derivarite along y and x.
Arguments
---------
X: (m, n, l) numpy array
The data to derivate.
Returns
-------
tuple
Tuple of length 2 (Dy(X), Dx(X)).
Note
----
DX[0] which is derivate along y has shape (m-1, n, l).
DX[1] which is derivate along x has shape (m, n-1, l).
"""
return (X[1:, :, :] - X[:-1, :, :], # D along y
X[:, 1:, :] - X[:, 0:-1, :]) # D along x | 4aff05c2c25089c9f93b762a18dad42b0142db09 | 10,712 |
def load_spectra_from_dataframe(df):
"""
:param df:pandas dataframe
:return:
"""
total_flux = df.total_flux.values[0]
spectrum_file = df.spectrum_filename.values[0]
pink_stride = df.spectrum_stride.values[0]
spec = load_spectra_file(spectrum_file, total_flux=total_flux,
pinkstride=pink_stride, as_spectrum=True)
return spec | 31d1cbbee8d999dac5ee0d7f8d4c71f7f58afc3b | 10,713 |
def included_element(include_predicates, exclude_predicates, element):
"""Return whether an index element should be included."""
return (not any(evaluate_predicate(element, ep)
for ep in exclude_predicates) and
(include_predicates == [] or
any(evaluate_predicate(element, ip)
for ip in include_predicates))) | 00e0d66db26e8bca7e3cb8505596247065422cb6 | 10,714 |
def _insertstatushints(x):
"""Insert hint nodes where status should be calculated (first path)
This works in bottom-up way, summing up status names and inserting hint
nodes at 'and' and 'or' as needed. Thus redundant hint nodes may be left.
Returns (status-names, new-tree) at the given subtree, where status-names
is a sum of status names referenced in the given subtree.
"""
if x is None:
return (), x
op = x[0]
if op in {'string', 'symbol', 'kindpat'}:
return (), x
if op == 'not':
h, t = _insertstatushints(x[1])
return h, (op, t)
if op == 'and':
ha, ta = _insertstatushints(x[1])
hb, tb = _insertstatushints(x[2])
hr = ha + hb
if ha and hb:
return hr, ('withstatus', (op, ta, tb), ('string', ' '.join(hr)))
return hr, (op, ta, tb)
if op == 'or':
hs, ts = zip(*(_insertstatushints(y) for y in x[1:]))
hr = sum(hs, ())
if sum(bool(h) for h in hs) > 1:
return hr, ('withstatus', (op,) + ts, ('string', ' '.join(hr)))
return hr, (op,) + ts
if op == 'list':
hs, ts = zip(*(_insertstatushints(y) for y in x[1:]))
return sum(hs, ()), (op,) + ts
if op == 'func':
f = getsymbol(x[1])
# don't propagate 'ha' crossing a function boundary
ha, ta = _insertstatushints(x[2])
if getattr(symbols.get(f), '_callstatus', False):
return (f,), ('withstatus', (op, x[1], ta), ('string', f))
return (), (op, x[1], ta)
raise error.ProgrammingError('invalid operator %r' % op) | 956fe03a7f5747f93034501e63cc31ff2956c2d6 | 10,715 |
def make_sine(freq: float, duration: float, sr=SAMPLE_RATE):
"""Return sine wave based on freq in Hz and duration in seconds"""
N = int(duration * sr) # Number of samples
return np.sin(np.pi*2.*freq*np.arange(N)/sr) | 622b03395da5d9f8a22ac0ac30282e23d6596055 | 10,716 |
def _widget_abbrev(o):
"""Make widgets from abbreviations: single values, lists or tuples."""
float_or_int = (float, int)
if isinstance(o, (list, tuple)):
if o and all(isinstance(x, string_types) for x in o):
return DropdownWidget(values=[unicode_type(k) for k in o])
elif _matches(o, (float_or_int, float_or_int)):
min, max, value = _get_min_max_value(o[0], o[1])
if all(isinstance(_, int) for _ in o):
cls = IntSliderWidget
else:
cls = FloatSliderWidget
return cls(value=value, min=min, max=max)
elif _matches(o, (float_or_int, float_or_int, float_or_int)):
step = o[2]
if step <= 0:
raise ValueError("step must be >= 0, not %r" % step)
min, max, value = _get_min_max_value(o[0], o[1], step=step)
if all(isinstance(_, int) for _ in o):
cls = IntSliderWidget
else:
cls = FloatSliderWidget
return cls(value=value, min=min, max=max, step=step)
else:
return _widget_abbrev_single_value(o) | f5a57f2d74811ff21ea56631fd9fb22fea4ae91f | 10,717 |
def get_conditions():
"""
List of conditions
"""
return [
'blinded',
'charmed',
'deafened',
'fatigued',
'frightened',
'grappled',
'incapacitated',
'invisible',
'paralyzed',
'petrified',
'poisoned',
'prone',
'restrained',
'stunned',
'unconscious',
'exhaustion'
] | 816ccb50581cafa20bdefed2a075a3370704cef4 | 10,718 |
def negative_predictive_value(y_true: np.array, y_score: np.array) -> float:
"""
Calculate the negative predictive value (duplicted in :func:`precision_score`).
Args:
y_true (array-like): An N x 1 array of ground truth values.
y_score (array-like): An N x 1 array of predicted values.
Returns:
npv (float): The negative predictive value.
"""
tn = true_negative(y_true, y_score)
fn = false_negative(y_true, y_score)
npv = tn / (tn + fn)
return npv | 28f1d4fce76b6201c6dbeb99ad19337ca84b74c5 | 10,719 |
def flat_list(*alist):
"""
Flat a tuple, list, single value or list of list to flat list
e.g.
>>> flat_list(1,2,3)
[1, 2, 3]
>>> flat_list(1)
[1]
>>> flat_list([1,2,3])
[1, 2, 3]
>>> flat_list([None])
[]
"""
a = []
for x in alist:
if x is None:
continue
if isinstance(x, (tuple, list)):
a.extend([i for i in x if i is not None])
else:
a.append(x)
return a | 5a68495e507e9a08a9f6520b83a912cf579c6688 | 10,720 |
from typing import List
def do_regression(X_cols: List[str], y_col: str, df: pd.DataFrame, solver='liblinear', penalty='l1',
C=0.2) -> LogisticRegression:
"""
Performs regression.
:param X_cols: Independent variables.
:param y_col: Dependent variable.
:param df: Data frame.
:param solver: Solver. Default is liblinear.
:param penalty: Penalty. Default is ``l1``.
:param C: Strength of regularlization. Default is ``0.2``.
:return: Logistic regression model.
"""
X = df[X_cols]
y = df[y_col]
model = LogisticRegression(penalty=penalty, solver=solver, C=C)
model.fit(X, y)
return model | 8a65d49e64e96b3fc5271545afe1761382ec1396 | 10,721 |
def gaussian_smooth(var, sigma):
"""Apply a filter, along the time dimension.
Applies a gaussian filter to the data along the time dimension. if the
time dimension is missing, raises an exception. The DataArray that is
returned is shortened along the time dimension by sigma, half of sigma on
each end.
The width of the window is 2xsigma + 1.
"""
if type(var) is not xr.DataArray:
raise TypeError("First argument must be an Xarray DataArray.")
if 'time' not in var.dims:
raise IndexError("Time coordinate not found.")
# The convolution window must have the same number of dimensions as the
# variable. The length of every dimension is one, except time, which is
# 2xsigma + 1.
var_dimensions = np.ones( len(var.coords), dtype=np.int )
timepos = var.dims.index('time')
var_dimensions[timepos] = 2*sigma + 1
# Use a normalized gaussian so the average of the variable does not change.
gausswin = gaussian(2*sigma + 1, sigma)
gausswin = gausswin/np.sum(gausswin)
# The window series used in the convolve operation is the gaussion for the
# time dimension and a singleton zero for the other dimensions. This way
# the multidimension covolve is:
#
# g(m,n,...) = \sum_k \sum_l ... f[k,l,...]h[k-m]\delta_l0...
#
timeslice_specification = [0 for x in range(len(var.coords))]
timeslice_specification[timepos] = slice(None)
win = np.zeros(var_dimensions)
win[timeslice_specification] = gausswin
# The third parameter 'same' specifies a return array of the same shape as
# var.
out = convolve(var, win, 'same')
outda = xr.DataArray(out,
name=var.name,
coords=var.coords,
dims=var.dims)
outda.attrs = var.attrs
# # Append "(Gaussian filtered: sigma = ###" to the end of th variable name.
# newname = "{0} (Gaussian filtered: sigma = {1})".format(var.name, sigma)
# outda.name = newname
return outda | 809ec7b135ab7d915dd62ad10baea71bfd146e34 | 10,722 |
import logging
def make_ood_dataset(ood_dataset_cls: _BaseDatasetClass) -> _BaseDatasetClass:
"""Generate a BaseDataset with in/out distribution labels."""
class _OodBaseDataset(ood_dataset_cls):
"""Combine two datasets to form one with in/out of distribution labels."""
def __init__(
self,
in_distribution_dataset: BaseDataset,
shuffle_datasets: bool = False,
**kwargs):
super().__init__(**kwargs)
# This should be the builder for whatever split will be considered
# in-distribution (usually the test split).
self._in_distribution_dataset = in_distribution_dataset
self._shuffle_datasets = shuffle_datasets
def load(self,
*,
preprocess_fn=None,
batch_size: int = -1) -> tf.data.Dataset:
# Set up the in-distribution dataset using the provided dataset builder.
if preprocess_fn:
dataset_preprocess_fn = preprocess_fn
else:
dataset_preprocess_fn = (
self._in_distribution_dataset._create_process_example_fn()) # pylint: disable=protected-access
dataset_preprocess_fn = ops.compose(
dataset_preprocess_fn,
_create_ood_label_fn(True))
dataset = self._in_distribution_dataset.load(
preprocess_fn=dataset_preprocess_fn,
batch_size=batch_size)
# Set up the OOD dataset using this class.
if preprocess_fn:
ood_dataset_preprocess_fn = preprocess_fn
else:
ood_dataset_preprocess_fn = super()._create_process_example_fn()
ood_dataset_preprocess_fn = ops.compose(
ood_dataset_preprocess_fn,
_create_ood_label_fn(False))
ood_dataset = super().load(
preprocess_fn=ood_dataset_preprocess_fn,
batch_size=batch_size)
# We keep the fingerprint id in both dataset and ood_dataset
# Combine the two datasets.
try:
combined_dataset = dataset.concatenate(ood_dataset)
except TypeError:
logging.info(
'Two datasets have different types, concat feature and label only')
def clean_keys(example):
# only keep features and labels, remove the rest
return {
'features': example['features'],
'labels': example['labels'],
'is_in_distribution': example['is_in_distribution']
}
combined_dataset = dataset.map(clean_keys).concatenate(
ood_dataset.map(clean_keys))
if self._shuffle_datasets:
combined_dataset = combined_dataset.shuffle(self._shuffle_buffer_size)
return combined_dataset
@property
def num_examples(self):
return (
self._in_distribution_dataset.num_examples +
super().num_examples)
return _OodBaseDataset | c1c26206e352932d3a5397f047365c8c5c8b7fa7 | 10,723 |
def _title_case(value):
"""
Return the title of the string but the
first letter is affected.
"""
return value[0].upper() + value[1:] | 037bce973580f69d87c2e3b4e016b626a2b76abb | 10,724 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.