content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_curve_points(
road: Road,
center: np.ndarray,
road_end: np.ndarray,
placement_offset: float,
is_end: bool,
) -> list[np.ndarray]:
"""
:param road: road segment
:param center: road intersection point
:param road_end: end point of the road segment
:param placement_offset: offset based on placement tag value
:param is_end: whether the point represents road end
"""
width: float = road.width / 2.0 * road.scale
direction: np.ndarray = (center - road_end) / np.linalg.norm(
center - road_end
)
if is_end:
direction = -direction
left: np.ndarray = turn_by_angle(direction, np.pi / 2.0) * (
width + placement_offset
)
right: np.ndarray = turn_by_angle(direction, -np.pi / 2.0) * (
width - placement_offset
)
return [road_end + left, center + left, center + right, road_end + right] | 9bc22c1894332a70e904d4c543606c3f38606064 | 10,600 |
def default_inputs_folder_at_judge(receiver):
"""
When a receiver is added to a task and `receiver.send_to_judge` is checked,
this function will be used to automatically set the name of the folder with inputs at judge server.
When this function is called SubmitReceiver object is created but is not saved in database yet.
"""
return '{}-{}'.format(submit_settings.JUDGE_INTERFACE_IDENTITY, receiver.id) | 0f45a374a32feb19ffe17d394831123ca8af68c8 | 10,601 |
def remove_extension(string):
""" Removes the extention from a string, as well as the directories.
This function may fail if more than one . is in the file, such as ".tar.gz"
Args:
string: (string): either a path or a filename that for a specific file, with extension.
(e.g. /usr/dir/sample.mitograph or sample.mitograph)
Returns:
filename_without_extension (str): just the filename without the extension (e.g. "sample")
"""
# Remove all enclosing directories, only get the name of file.
cur_filename_with_extension = remove_enclosing_dirs(string)
# Remove the extension by splitting the string at each "." and only taking first part.
filename_without_extension = cur_filename_with_extension.split(".")[0]
return filename_without_extension | 8bdd3818696745c5955dfb5bd7725d87e1284103 | 10,602 |
def _theme_static(path):
"""
Serve arbitrary files.
"""
return static_app.static(path, 'theme') | c93815b041632c313961afbe7ef254117c4259de | 10,603 |
def create_link(link: schemas.Link, db: Session = Depends(get_db)):
"""Create link
"""
# Check if the target already exists
db_link = crud.get_link_by_target(db=db, target=link.target)
if db_link:
raise HTTPException(status_code=400, detail="link already registered")
response = crud.create_link(db=db, link=link)
payload = {'link': response.link}
return JSONResponse(content=payload) | b220403b6d054df0f0e6f0538573038b0f7895b3 | 10,604 |
def encode_rsa_public_key(key):
"""
Encode an RSA public key into PKCS#1 DER-encoded format.
:param PublicKey key: RSA public key
:rtype: bytes
"""
return RSAPublicKey({
'modulus': int.from_bytes(key[Attribute.MODULUS], byteorder='big'),
'public_exponent': int.from_bytes(key[Attribute.PUBLIC_EXPONENT],
byteorder='big'),
}).dump() | 38b1c3b4ee361415fa8587df7dbfdd94d00fdbe1 | 10,605 |
def is_block_valid(new_block, old_block):
"""
simple verify if the block is valid.
"""
if old_block["Index"] + 1 != new_block["Index"]:
return False
if old_block["Hash"] != new_block["PrevHash"]:
return False
if caculate_hash(new_block) != new_block["Hash"]:
return False
return True | 8447ca7b7bbb75748601d4a79d97047ad7ef07ab | 10,606 |
import urllib
def add_get_parameter(url, key, value):
"""
Utility method to add an HTTP request parameter to a GET request
"""
if '?' in url:
return url + "&%s" % urllib.urlencode([(key, value)])
else:
return url + "?%s" % urllib.urlencode([(key, value)]) | 640de1f111ff9080386f855e220e8eaaad113a0a | 10,607 |
def get_simple_match(text):
"""Returns a word instance in the dictionary, selected by a simplified String match"""
# Try to find a matching word
try:
result = word.get(word.normalized_text == text)
return result
except peewee.DoesNotExist:
return None | e8365b6129e452eb17696daf8638a573e8d0cb4b | 10,608 |
def ipv_plot_df(points_df, sample_frac=1, marker='circle_2d', size=0.2, **kwargs):
"""Plot vertices in a dataframe using ipyvolume."""
if sample_frac < 1:
xyz = random_sample(points_df, len(points_df), sample_frac)
else:
xyz = dict(x=points_df['x'].values, y=points_df['y'].values, z=points_df['z'].values)
fig = ipv.scatter(**xyz, marker=marker, size=size, **kwargs)
return fig | a957629bcf7b9acbff314f243a3cae9803bda69d | 10,609 |
import subprocess
def exec_command_stdout(*command_args, **kwargs):
"""
Capture and return the standard output of the command specified by the
passed positional arguments, optionally configured by the passed keyword
arguments.
Unlike the legacy `exec_command()` and `exec_command_all()` functions, this
modern function is explicitly designed for cross-platform portability. The
return value may be safely used for any purpose, including string
manipulation and parsing.
.. NOTE::
If this command's standard output contains _only_ pathnames, this
function does _not_ return the correct filesystem-encoded string expected
by PyInstaller. If this is the case, consider calling the
filesystem-specific `exec_command()` function instead.
Parameters
----------
cmdargs : list
Variadic list whose:
1. Mandatory first element is the absolute path, relative path,
or basename in the current `${PATH}` of the command to run.
1. Optional remaining elements are arguments to pass to this command.
encoding : str, optional
Optional name of the encoding with which to decode this command's
standard output (e.g., `utf8`), passed as a keyword argument. If
unpassed , this output will be decoded in a portable manner specific to
to the current platform, shell environment, and system settings with
Python's built-in `universal_newlines` functionality.
All remaining keyword arguments are passed as is to the
`subprocess.check_output()` function.
Returns
----------
unicode or str
Unicode string of this command's standard output decoded according to
the "encoding" keyword argument. This string's type depends on the
current Python version as follows:
* Under Python 2.7, this is a decoded `unicode` string.
* Under Python 3.x, this is a decoded `str` string.
"""
# Value of the passed "encoding" parameter, defaulting to None.
encoding = kwargs.pop('encoding', None)
# If no encoding was specified, the current locale is defaulted to. Else, an
# encoding was specified. To ensure this encoding is respected, the
# "universal_newlines" option is disabled if also passed. Nice, eh?
kwargs['universal_newlines'] = encoding is None
# Standard output captured from this command as a decoded Unicode string if
# "universal_newlines" is enabled or an encoded byte array otherwise.
stdout = subprocess.check_output(command_args, **kwargs)
# Return a Unicode string, decoded from this encoded byte array if needed.
return stdout if encoding is None else stdout.decode(encoding) | 32552fed9fd250548c0826a8b2679fa46bd8bf14 | 10,610 |
def admin_login():
"""
This function is used to show the admin login page
:return: admin_login.html
"""
return render_template("admin_login.html") | 495841f7cb352a07d8214f99b99ca8be7179839f | 10,611 |
from sys import path
def input_file_location(message):
"""
This function performs basic quality control of user input. It
calls for a filepath with a pre-specified message. The function
then checks if the given filepath leads to an actual existing
file. If no file exists at the given location, the function will
throw an error message and ask for a new file location.
:param message: String. Contains the message asking for a filepath
:return filepath: String. Contains a filepath leading to the file.
"""
filepath = input(message)
flag = path.isfile(filepath)
while not flag:
filepath = input("Error: file not found! \n"
"Please specify full relative filepath leading to the required file")
flag = path.isfile(filepath)
print("%s succesfully located"%(filepath))
return filepath | e0fd6c728c900c1eade36d9b03075ec3d79d22d4 | 10,612 |
def create_contact():
"""
Get a contact form submission
"""
data = request.get_json(force=True)
contact = ContactDAO.create(**data)
return jsonify(contact.to_dict()) | af2c5efbd06d3220faf3b16059ea9d612cece19e | 10,613 |
import os
import ntpath
def make_my_tuple_video(LOGGER, image, width, height, frames, codec, metric, target, subsampling, param, uuid=None):
""" make unique tuple for unique directory, primary key in DB, etc.
"""
(filepath, tempfilename) = os.path.split(image)
filename, extension = os.path.splitext(tempfilename)
my_tuple = '{filename}_{extension}_{width}x{height}x{frames}_{codec}_{metric}_{target}_{subsampling}_{param}_' \
.format(filename=filename, extension=extension[1:], image=ntpath.basename(image), width=width, height=height,
frames=frames, codec=codec, metric=metric, target=target, subsampling=subsampling, param=param)
if uuid is not None:
my_tuple = my_tuple + uuid
if len(my_tuple) > 255: # limits due to max dir name or file name length on UNIX
LOGGER.error("ERROR : Tuple too long : " + my_tuple)
assert len(my_tuple) < 256
return my_tuple | b37a38d6f76361fa85974d5ad56303d4f6ae308d | 10,614 |
from typing import DefaultDict
from typing import Tuple
from typing import List
import copy
def separate_sets(
hand: DefaultDict[int, int], huro_count: int, koutsu_first: bool = True
) -> Tuple[List[Tile], List[List[Tile]], Tile]:
"""Helper function for seperating player's remaining hands into sets.
It should either be 14, 11, 8, 5, or 2 tiles.
The arg koutsu_first would change the priority for koutsu and shuntsu,
for example in the usecase for checking 全帯么九, shuntsu should have
priority over koutsu.
"""
def check_koutsu(sets_to_find):
if remain_tiles[tile_index] >= 3: # check for Koutsu
remain_tiles[tile_index] -= 3
sets_to_find -= 1
koutsu.append(Tile.from_index(tile_index))
return sets_to_find
def check_shuntsu(sets_to_find):
if remain_tiles[tile_index + 2] > 0: # check for Shuntsu
chii_n = min(
remain_tiles[tile_index],
remain_tiles[tile_index + 1],
remain_tiles[tile_index + 2]
)
if chii_n > 0:
remain_tiles[tile_index] -= chii_n
remain_tiles[tile_index + 1] -= chii_n
remain_tiles[tile_index + 2] -= chii_n
sets_to_find -= chii_n
for _ in range(chii_n):
shuntsu.append([
Tile.from_index(tile_index),
Tile.from_index(tile_index + 1),
Tile.from_index(tile_index + 2)
])
return sets_to_find
for possible_jantou in hand.keys():
if hand[possible_jantou] >= 2: # try using it as jantou
remain_tiles = copy.deepcopy(hand)
remain_tiles[possible_jantou] -= 2
koutsu = []
shuntsu = []
sets_to_find = 4 - huro_count
for tile_index in sorted(remain_tiles.keys()):
if tile_index < Tile(Suit.MANZU.value, 1).index:
if remain_tiles[tile_index] == 3:
sets_to_find -= 1
koutsu.append(Tile.from_index(tile_index))
else: # numbered tiles
if koutsu_first:
sets_to_find = check_koutsu(sets_to_find)
sets_to_find = check_shuntsu(sets_to_find)
else:
sets_to_find = check_shuntsu(sets_to_find)
sets_to_find = check_koutsu(sets_to_find)
if sets_to_find == 0:
return koutsu, shuntsu, Tile.from_index(possible_jantou)
return [], [], None | 894a712a739e16a98e2150c4461a3d66c759bace | 10,615 |
def units(legal_codes):
"""
Return sorted list of the unique units for the given
dictionaries representing legal_codes
"""
return sorted(set(lc["unit"] for lc in legal_codes)) | 85803ecb3d1f51c058c959b7e060c3cb5263f6a3 | 10,616 |
def resize_terms(terms1, terms2, patterns_to_pgS, use_inv):
"""
Resize the terms to ensure that the probabilities are the same on both sides.
This is necessary to maintain the null hypothesis that D = 0 under no introgression.
Inputs:
terms1 --- a set of patterns to count and add to each other to determine introgression
terms2 --- a set of other patterns to count and add to each other to determine introgression
use_inv --- boolean for determining if inverse site patterns will be used
patterns_to_pgS --- a mapping of site patterns to their p(gt|st) values
Outputs:
terms1 --- a set of patterns to count and add to each other to determine introgression
terms2 --- a set of other patterns to count and add to each other to determine introgression
"""
terms1 = list(terms1)
terms2 = list(terms2)
# Create a mapping of pgtst to trees for each term
pgtst_to_trees1 = defaultdict(set)
pgtst_to_trees2 = defaultdict(set)
for tree in terms1:
# Round the probability to the 15th digit to prevent the randomness issues with small values
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees1[prob].add(tree)
for tree in terms2:
# Round the probability to the 15th digit to prevent the randomness issues with small values
prob = float(format(patterns_to_pgS[tree], '.15f'))
pgtst_to_trees2[prob].add(tree)
# Balance terms
terms1_prob_counts = defaultdict(int)
terms2_prob_counts = defaultdict(int)
# Round each probability and count the number of times it occurs
for tree in terms1:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms1_prob_counts[prob] += 1
for tree in terms2:
prob = float(format(patterns_to_pgS[tree], '.15f'))
terms2_prob_counts[prob] += 1
# Iterate over each probability
for prob in terms1_prob_counts:
# Get the number of times each probability occurs
count1, count2 = terms1_prob_counts[prob], terms2_prob_counts[prob]
removed = set([])
# The number of site patterns to remove is the difference in counts
num_remove = abs(count2 - count1)
if use_inv:
# If not using inverses remove the inverse along with the normal pattern
num_remove = num_remove / 2
# If probabilities do not occur an equal number of times remove site patterns until they do
if count1 > count2:
for i in range(num_remove):
# Get a pattern to remove and remove it from the possible removals
r = sorted(list(pgtst_to_trees1[prob])).pop(0)
pgtst_to_trees1[prob].remove(r)
removed.add(r)
terms1_remove = True
if count1 < count2:
for i in range(num_remove):
# Get a pattern to remove and remove it from the possible removals
r = sorted(list(pgtst_to_trees2[prob])).pop(0)
pgtst_to_trees2[prob].remove(r)
removed.add(r)
terms1_remove = False
if use_inv:
# Remove site patterns and their inverses
rm = set([])
inv_rm = pattern_inverter(removed)
for pattern in inv_rm:
rm.add(''.join(pattern))
removed = removed.union(rm)
# Iterate over each pattern to be removed and remove it
for pattern in removed:
if terms1_remove:
terms1.remove(pattern)
else:
terms2.remove(pattern)
terms1, terms2 = tuple(terms1), tuple(terms2)
return terms1, terms2 | d422e3d5b32df55036afa3788cdb0bdd4aa95001 | 10,617 |
def get_network_list():
"""Get a list of networks.
---
tags:
- network
"""
return jsonify([
network.to_json(include_id=True)
for network in manager.cu_list_networks()
]) | 6a54b76091160fc28cd45502aea4c54d2862a588 | 10,618 |
def bfixpix(data, badmask, n=4, retdat=False):
"""Replace pixels flagged as nonzero in a bad-pixel mask with the
average of their nearest four good neighboring pixels.
:INPUTS:
data : numpy array (two-dimensional)
badmask : numpy array (same shape as data)
:OPTIONAL_INPUTS:
n : int
number of nearby, good pixels to average over
retdat : bool
If True, return an array instead of replacing-in-place and do
_not_ modify input array `data`. This is always True if a 1D
array is input!
:RETURNS:
another numpy array (if retdat is True)
:TO_DO:
Implement new approach of Popowicz+2013 (http://arxiv.org/abs/1309.4224)
"""
# 2010-09-02 11:40 IJC: Created
#2012-04-05 14:12 IJMC: Added retdat option
# 2012-04-06 18:51 IJMC: Added a kludgey way to work for 1D inputs
# 2012-08-09 11:39 IJMC: Now the 'n' option actually works.
if data.ndim==1:
data = np.tile(data, (3,1))
badmask = np.tile(badmask, (3,1))
ret = bfixpix(data, badmask, n=2, retdat=True)
return ret[1]
nx, ny = data.shape
badx, bady = np.nonzero(badmask)
nbad = len(badx)
if retdat:
data = np.array(data, copy=True)
for ii in range(nbad):
thisloc = badx[ii], bady[ii]
rad = 0
numNearbyGoodPixels = 0
while numNearbyGoodPixels<n:
rad += 1
xmin = max(0, badx[ii]-rad)
xmax = min(nx, badx[ii]+rad)
ymin = max(0, bady[ii]-rad)
ymax = min(ny, bady[ii]+rad)
x = np.arange(nx)[xmin:xmax+1]
y = np.arange(ny)[ymin:ymax+1]
yy,xx = np.meshgrid(y,x)
#print ii, rad, xmin, xmax, ymin, ymax, badmask.shape
rr = abs(xx + 1j*yy) * (1. - badmask[xmin:xmax+1,ymin:ymax+1])
numNearbyGoodPixels = (rr>0).sum()
closestDistances = np.unique(np.sort(rr[rr>0])[0:n])
numDistances = len(closestDistances)
localSum = 0.
localDenominator = 0.
for jj in range(numDistances):
localSum += data[xmin:xmax+1,ymin:ymax+1][rr==closestDistances[jj]].sum()
localDenominator += (rr==closestDistances[jj]).sum()
#print badx[ii], bady[ii], 1.0 * localSum / localDenominator, data[xmin:xmax+1,ymin:ymax+1]
data[badx[ii], bady[ii]] = 1.0 * localSum / localDenominator
if retdat:
ret = data
else:
ret = None
return ret | ae6b6c44e82dc70f998b31d9645cf74fef92c9fd | 10,619 |
import re
def parse_discount(element):
"""Given an HTML element, parse and return the discount."""
try:
# Remove any non integer characters from the HTML element
discount = re.sub("\D", "", element)
except AttributeError:
discount = "0"
return discount | 658f8a6bef8ba4bf82646a10c495904c03a717c7 | 10,620 |
import bisect
def read_files(allVCFs):
"""
Load all vcfs and count their number of entries
"""
# call exists in which files
call_lookup = defaultdict(list)
# total number of calls in a file
file_abscnt = defaultdict(float)
for vcfn in allVCFs:
v = parse_vcf(vcfn)
# disallow intra vcf duplicates
seen = {}
for entry in v:
key = entry_key(entry)
if key in seen:
continue
seen[key] = True
bisect.insort(call_lookup[key], vcfn)
file_abscnt[vcfn] += 1
return call_lookup, file_abscnt | 8518eac3c43772016fd5cbe0fd6c423a1e463ebc | 10,621 |
def parse_dat_file(dat_file):
"""
Parse a complete dat file.
dat files are transposed wrt the rest of the data formats here. In addition, they only contain integer fields,
so we can use np.loadtxt.
First 6 columns are ignored.
Note: must have a bims and info file to process completely.
Parameters
----------
dat_file: str
Path for dat file to process.
Returns
-------
data: array-like
"""
data = np.loadtxt(dat_file)
data = data[:, 6:].T
return data | 3b84730a347075c5be1e0ebe5a195338a86ed0c6 | 10,622 |
import argparse
import warnings
def parse_cmd_arguments(mode='split_audioset', default=False, argv=None):
"""Parse command-line arguments.
Args:
mode (str): The mode of the experiment.
default (optional): If True, command-line arguments will be ignored and
only the default values will be parsed.
argv (optional): If provided, it will be treated as a list of command-
line argument that is passed to the parser in place of sys.argv.
Returns:
The Namespace object containing argument names and values.
"""
description = 'Continual learning on Audioset task.'
parser = argparse.ArgumentParser(description=description)
dnum_tasks = 1
dnum_classes_per_task = 10
if mode == 'split_audioset':
dnum_tasks = 10
dnum_classes_per_task = 10
dval_set_size=500
if mode == 'audioset':
dnum_tasks = 1
dnum_classes_per_task = 100
dval_set_size=5000
cli.cl_args(parser, show_beta=True, dbeta=0.005,
show_from_scratch=True, show_multi_head=True,
show_split_head_cl3=False, show_cl_scenario=False,
show_num_tasks=True, dnum_tasks=dnum_tasks,
show_num_classes_per_task=True,
dnum_classes_per_task=dnum_classes_per_task)
cli.train_args(parser, show_lr=True, show_epochs=False,
dbatch_size=64, dn_iter=5000,
dlr=1e-3, show_clip_grad_value=False, show_clip_grad_norm=True,
show_momentum=False, show_adam_beta1=True)
seq.rnn_args(parser, drnn_arch='32', dnet_act='tanh')
cli.hypernet_args(parser, dhyper_chunks=-1, dhnet_arch='50,50',
dtemb_size=32, demb_size=32, dhnet_act='relu')
# Args of new hnets.
nhnet_args = cli.hnet_args(parser, allowed_nets=['hmlp', 'chunked_hmlp',
'structured_hmlp', 'hdeconv', 'chunked_hdeconv'], dhmlp_arch='50,50',
show_cond_emb_size=True, dcond_emb_size=32, dchmlp_chunk_size=1000,
dchunk_emb_size=32, show_use_cond_chunk_embs=True,
dhdeconv_shape='512,512,3', prefix='nh_',
pf_name='new edition of a hyper-', show_net_act=True, dnet_act='relu',
show_no_bias=True, show_dropout_rate=True, ddropout_rate=-1,
show_specnorm=True, show_batchnorm=False, show_no_batchnorm=False)
seq.new_hnet_args(nhnet_args)
cli.init_args(parser, custom_option=False, show_normal_init=False,
show_hyper_fan_init=True)
cli.eval_args(parser, dval_iter=250, show_val_set_size=True,
dval_set_size=dval_set_size)
magroup = cli.miscellaneous_args(parser, big_data=False,
synthetic_data=True, show_plots=True, no_cuda=True,
show_publication_style=False)
seq.ewc_args(parser, dewc_lambda=5000., dn_fisher=-1, dtbptt_fisher=-1,
dts_weighting_fisher='last')
seq.si_args(parser, dsi_lambda=1.)
seq.context_mod_args(parser, dsparsification_reg_type='l1',
dsparsification_reg_strength=1., dcontext_mod_init='constant')
seq.miscellaneous_args(magroup, dmask_fraction=0.8, dclassification=True,
dts_weighting='last', show_use_ce_loss=False)
# Replay arguments.
rep_args = seq.replay_args(parser)
cli.generator_args(rep_args, dlatent_dim=100)
cli.main_net_args(parser, allowed_nets=['simple_rnn'],
dsrnn_rec_layers='32', dsrnn_pre_fc_layers='',
dsrnn_post_fc_layers='',
show_net_act=True, dnet_act='tanh', show_no_bias=True,
show_dropout_rate=False, show_specnorm=False, show_batchnorm=False,
prefix='dec_', pf_name='replay decoder')
args = None
if argv is not None:
if default:
warnings.warn('Provided "argv" will be ignored since "default" ' +
'option was turned on.')
args = argv
if default:
args = []
config = parser.parse_args(args=args)
config.mode = mode
### Check argument values!
cli.check_invalid_argument_usage(config)
seq.check_invalid_args_sequential(config)
if config.train_from_scratch:
# FIXME We could get rid of this warning by properly checkpointing and
# loading all networks.
warnings.warn('When training from scratch, only during accuracies ' +
'make sense. All other outputs should be ignored!')
return config | e518cc9d9e643db349b9c3b7c34e6cfbd8ddc039 | 10,623 |
from hypothesis.provisional import domains
import string
def emails():
"""A strategy for generating email addresses as unicode strings. The
address format is specific in :rfc:`5322#section-3.4.1`. Values shrink
towards shorter local-parts and host domains.
This strategy is useful for generating "user data" for tests, as
mishandling of email addresses is a common source of bugs. Future
updates will generate more complicated addresses allowed by the RFC.
"""
local_chars = string.ascii_letters + string.digits + "!#$%&'*+-/=^_`{|}~"
local_part = text(local_chars, min_size=1, max_size=64)
# TODO: include dot-atoms, quoted strings, escaped chars, etc in local part
return builds(u'{}@{}'.format, local_part, domains()).filter(
lambda addr: len(addr) <= 255) | b6a07ee114827e48873ab4ea3dc6679c88ae1c00 | 10,624 |
from typing import List
def next_whole_token(
wordpiece_subtokens,
initial_tokenizer,
subword_tokenizer):
"""Greedily reconstitutes a whole token from a WordPiece list.
This function assumes that the wordpiece subtokens were constructed correctly
from a correctly subtokenized CuBERT tokenizer, but the sequence may be
truncated and thus incomplete.
The implementation is done in two stages: recognizing the first whole token
and then finding the correspondence of that first whole token to a prefix of
the subtoken sequence.
The implementation assumes that untokenization can do the best job on the full
context. So, it first untokenizes the whole sequence, and chooses the first
whole token.
To figure out the subtoken prefix that corresponds to that whole token, the
implementation greedily untokenizes longer and longer subtoken prefixes, until
the whole token is recognized in the output.
The reason for this somewhat expensive implementation is that the logic for
merging subtokens (for WordPiece and then for CuBERT) is intricate, and does
not export how many initial subtokens were consumed for each output token of
the next higher abstraction. What's more, a subtoken may align itself with
the previous or the next whole token, when the subtoken sequence is
incomplete.
Args:
wordpiece_subtokens: The subtokens to scan through.
initial_tokenizer: A CuBERT tokenizer.
subword_tokenizer: A SubwordTextEncoder.
Returns:
The first whole token matched, and the end index of the first subtoken index
after the first whole token. wordpiece_subtokens[0:end_index] should be
the subtokens corresponding to the whole token returned.
Raises:
ValueError if no whole token can be parsed.
"""
wordpiece_ids = wordpiece_ids_from_wordpiece_tokens(wordpiece_subtokens,
subword_tokenizer)
full_cubert_subtokens: List[str] = (
subword_tokenizer._subtoken_ids_to_tokens( # pylint: disable=protected-access
wordpiece_ids))
full_cubert_subtokens.append(
unified_tokenizer.quote_special(unified_tokenizer.TokenKind.EOS.name))
full_whole_tokens = initial_tokenizer.untokenize_agnostic(
full_cubert_subtokens)
if len(full_whole_tokens) < 2:
# It all came out a jumble. Reject it.
raise ValueError(f'Whole tokens {full_whole_tokens} ended up '
f'undifferentiable in {wordpiece_subtokens}.')
whole_token = full_whole_tokens[0]
for end_index in range(1, len(wordpiece_ids) + 1):
prefix_list = wordpiece_ids[:end_index]
partial_cubert_subtokens: List[str] = (
subword_tokenizer._subtoken_ids_to_tokens( # pylint: disable=protected-access
prefix_list))
# We strip EOS in `code_to_cubert_sentences`, so we have to add it back
# here.
partial_cubert_subtokens.append(
unified_tokenizer.quote_special(unified_tokenizer.TokenKind.EOS.name))
partial_whole_tokens = initial_tokenizer.untokenize_agnostic(
partial_cubert_subtokens)
if len(partial_whole_tokens) > 1:
if partial_whole_tokens[0] == whole_token:
return whole_token, end_index
# We got here because we couldn't match the whole token we found from the
# full sequence
raise ValueError('Could not find a whole token in %r' %
(wordpiece_subtokens,)) | d26f4da0932030242c2209bc998bc32b6ce98cdf | 10,625 |
import os
def parse_tracks(filename="tracks.csv"):
"""
Builds the tracks matrix #tracks x #attributes (20635 x 4)
where attributes are track_id,album_id,artist_id,duration_sec
"""
with open(os.path.join(data_path, filename), "r") as f:
# Discard first line
lines = f.readlines()[1:]
num_lines = len(lines)
# Sanity check
assert num_lines == NUM_TRACKS
# Build matrices
album_set = sp.dok_matrix((NUM_ALBUMS, NUM_TRACKS), dtype=np.uint8)
artist_set = sp.dok_matrix((NUM_ARTISTS, NUM_TRACKS), dtype=np.uint8)
for i, line in enumerate(lines):
# Parse album and artist
track, album, artist, _ = [np.int32(i) for i in line.split(",")]
album_set[album, track] = 1
artist_set[artist, track] = 1
print("\rParsing tracks: {:.4}%".format((i / num_lines) * 100), end="")
print("\n")
return album_set, artist_set | 2734e7f509e208394049b149f88ec90cb35e1e57 | 10,626 |
def match_seq_len(*arrays: np.ndarray):
"""
Args:
*arrays:
Returns:
"""
max_len = np.stack([x.shape[-1] for x in arrays]).max()
return [np.pad(x, pad_width=((0, 0), (0, 0), (max_len - x.shape[-1], 0)), mode='constant', constant_values=0) for x
in arrays] | 2cd8715eb634e0b3604e1d5c305a5209bb0ae03d | 10,627 |
import torch
import math
def get_cmws_5_loss(
generative_model, guide, memory, obs, obs_id, num_particles, num_proposals, insomnia=1.0
):
"""Normalize over particles-and-memory for generative model gradient
Args:
generative_model
guide
memory
obs: tensor of shape [batch_size, *obs_dims]
obs_id: long tensor of shape [batch_size]
num_particles (int): number of particles used to marginalize continuous latents
num_proposals (int): number of proposed elements to be considered as new memory
Returns: [batch_size]
"""
# Extract
batch_size = obs.shape[0]
# SAMPLE d'_{1:R} ~ q(d | x)
# [num_proposals, batch_size, ...]
proposed_discrete_latent = guide.sample_discrete(obs, (num_proposals,))
# ASSIGN d_{1:(R + M)} = CONCAT(d'_{1:R}, d_{1:M})
# [memory_size + num_proposals, batch_size, ...]
discrete_latent_concat = cmws.memory.concat(memory.select(obs_id), proposed_discrete_latent)
# COMPUTE SCORES s_i = log p(d_i, x) for i {1, ..., (R + M)}
# -- c ~ q(c | d, x)
# [num_particles, memory_size + num_proposals, batch_size, ...]
_continuous_latent = guide.sample_continuous(obs, discrete_latent_concat, [num_particles])
# -- log q(c | d)
# [num_particles, memory_size + num_proposals, batch_size]
_log_q_continuous = guide.log_prob_continuous(obs, discrete_latent_concat, _continuous_latent)
# -- log p(d, c, x)
# [num_particles, memory_size + num_proposals, batch_size]
_log_p = generative_model.log_prob_discrete_continuous(
discrete_latent_concat, _continuous_latent, obs
)
# [memory_size + num_proposals, batch_size]
log_marginal_joint = torch.logsumexp(_log_p - _log_q_continuous, dim=0) - math.log(
num_particles
)
# ASSIGN d_{1:M} = TOP_K_UNIQUE(d_{1:(R + M)}, s_{1:(R + M)})
# [memory_size, batch_size, ...], [memory_size, batch_size]
discrete_latent_selected, _, indices = cmws.memory.get_unique_and_top_k(
discrete_latent_concat, log_marginal_joint, memory.size, return_indices=True
)
# SELECT log q(c | d, x) and log p(d, c, x)
# [num_particles, memory_size, batch_size]
_log_q_continuous = torch.gather(
_log_q_continuous, 1, indices[None].expand(num_particles, memory.size, batch_size)
)
# [num_particles, memory_size, batch_size]
_log_p = torch.gather(_log_p, 1, indices[None].expand(num_particles, memory.size, batch_size))
# COMPUTE WEIGHT
# [num_particles, memory_size, batch_size]
_log_weight = _log_p - _log_q_continuous
# COMPUTE log q(d_i | x) for i in {1, ..., M}
# [memory_size, batch_size]
_log_q_discrete = guide.log_prob_discrete(obs, discrete_latent_selected,)
# UPDATE MEMORY with d_{1:M}
memory.update(obs_id, discrete_latent_selected)
# CHECK UNIQUE
# if not memory.is_unique(obs_id).all():
# raise RuntimeError("memory not unique")
# COMPUTE losses
# --Compute generative model loss
# [num_particles, memory_size, batch_size]
_log_weight_v = torch.softmax(_log_weight.view(-1, batch_size), dim=0).view(
num_particles, memory.size, batch_size
)
# [batch_size]
generative_model_loss = -(_log_weight_v.detach() * _log_p).sum(dim=[0, 1])
# --Compute guide loss
# ----Compute guide wake loss
batch_size = obs.shape[0]
if insomnia < 1.0:
# [batch_size]
guide_loss_sleep = (
get_sleep_loss(generative_model, guide, num_particles * batch_size)
.view(batch_size, num_particles)
.mean(-1)
)
# ----Compute guide CMWS loss
if insomnia > 0.0:
# [memory_size, batch_size]
_log_weight_omega = torch.logsumexp(_log_weight_v, dim=0)
# [batch_size]
discrete_guide_loss_cmws = -(_log_weight_omega.detach() * _log_q_discrete).sum(dim=0)
# [batch_size]
continuous_guide_loss_cmws = -(
(torch.softmax(_log_weight, dim=0).detach() * _log_q_continuous).sum(dim=0).mean(dim=0)
)
# [batch_size]
guide_loss_cmws = discrete_guide_loss_cmws + continuous_guide_loss_cmws
# ----Combine guide sleep and CMWS losses
if insomnia == 0.0:
guide_loss = guide_loss_sleep
elif insomnia == 1.0:
guide_loss = guide_loss_cmws
else:
guide_loss = insomnia * guide_loss_cmws + (1 - insomnia) * guide_loss_sleep
return generative_model_loss + guide_loss | 5e2b87a7d19eab1f09e5207f4c16c8e4a56b2225 | 10,628 |
import sys
import os
def chemin_absolu(relative_path):
"""
Donne le chemin absolu d'un fichier.
PRE : -
POST : Retourne ''C:\\Users\\sacre\\PycharmProjects\\ProjetProgra\\' + 'relative_path'.
"""
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
correct = base_path.index("ProjetProgra")
return os.path.join(base_path[:correct + 13], relative_path) | 15f69eed0bbdb9ba09375d1485daba6a4fa8097f | 10,629 |
import torch
def to_float_tensor(np_array):
"""
convert to long torch tensor
:param np_array:
:return:
"""
return torch.from_numpy(np_array).type(torch.float) | 84512d8383999bf22841c0e7e1fc8048bcba9a1a | 10,630 |
def display_heatmap(salience_scores,
salience_scores_2=None,
title=None,
title_2=None,
cell_labels=None,
cell_labels_2=None,
normalized=True,
ui=False):
"""
A utility function that displays a Seaborn heatmap.
Input:
- ('salience scores') A list of floats .
If task is something like NLI, then these are the salience scores for the premise, or first
sequence.
- ('salience_scores_2') A list of floats .
Optional. Only necessary when task is a relation labeling task between 2 sequences
like NLI. Then these are the salience scores for the hypothesis, or second sequence.
- ('title') Any object (string, integer, float, etc.) that can be printed.
Optional.
Usually is descriptive blurb for the heatmap for ('salience_scores')
- ('title_2') Any object (string, integer, float, etc.) that can be printed.
Optional. Usually is descriptive blurb for the heatmap for ('salience scores_2')
- ('cell_labels') Optional. list of the same size as ('salience_scores') that is printed
on the corresponding cell. Usually something like salience score values.
- ('cell_labels_2') Optional. list of the same size as ('salience_scores_2') that is printed
on the corresponding cell. Usually something like salience score values.
- ('normalized') A boolean denoting whether the data is normalized or not. If normalized,
the range is from -1 to 1.
- ('ui') A boolean for option of saving the plot instead to a file and returning the filename
Output:
- Return the matplotlib object
"""
if cell_labels is not None:
assert len(cell_labels) == len(salience_scores)
if cell_labels_2 is not None:
assert len(cell_labels_2) == len(salience_scores_2)
cmap = sns.diverging_palette(10, 240, as_cmap=True)
if salience_scores_2 is not None:
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12))
ax1.set_title(title if title is not None else "")
ax2.set_title(title_2 if title_2 is not None else "")
sns.heatmap([salience_scores],
ax=ax1,
annot=[cell_labels] if cell_labels is not None else False,
fmt='',
cmap=cmap,
linewidths=0.5,
square=True,
center=0,
vmin=-1 if normalized else None,
vmax=1 if normalized else None)
sns.heatmap([salience_scores_2],
ax=ax2,
annot=[cell_labels_2] if cell_labels_2 is not None else False,
fmt='',
cmap=cmap,
linewidths=0.5,
square=True,
center=0,
vmin=-1 if normalized else None,
vmax=1 if normalized else None)
else:
m = sns.heatmap([salience_scores],
annot=[cell_labels] if cell_labels is not None else False,
fmt='',
linewidths=0.5,
square=True,
cmap=cmap,
center=0,
vmin=-1 if normalized else None,
vmax=1 if normalized else None)
plt.title(title if title is not None else "")
#plt.show()
return plt | 8229db9630c8553567f0f93f8320c71397180ced | 10,631 |
import re
def _cleanse_line(line, main_character):
"""
Cleanse the extracted lines to remove formatting.
"""
# Strip the line, just in case.
line = line.strip()
# Clean up formatting characters.
line = line.replace('\\' , '') # Remove escape characters.
line = line.replace('[mc]', main_character) # Standardize MC name.
line = re.sub(r'{/?i}' , '*', line) # Convert italics to Markdown.
line = re.sub(r'{cps=\d+}', '' , line) # Remove scroll speed formatting.
return line | 87177c557ab89b77c63cc1df10874e52606258a7 | 10,632 |
def require_pandapower(f):
"""
Decorator for functions that require pandapower.
"""
@wraps(f)
def wrapper(*args, **kwds):
try:
getattr(pp, '__version__')
except AttributeError:
raise ModuleNotFoundError("pandapower needs to be manually installed.")
return f(*args, **kwds)
return wrapper | 35b0e5a5f9c4e189d849e3a6ba843b6f9e6b49b1 | 10,633 |
def optimal_path_fixture():
"""An optimal path, and associated distance, along the nodes of the pyramid"""
return [0, 1, 2, 3], 10 + 2 + 5 | 10c4e436907ecb99740a2514c927f05fd8488cf4 | 10,634 |
import timeit
def evaluate_DynamicHashtablePlusRemove(output=True):
"""
Compare performance using ability in open addressing to mark deleted values.
Nifty trick to produce just the squares as keys in the hashtable.
"""
# If you want to compare, then add following to end of executable statements:
# print([e[0] for e in ht])
tbl = DataTable([8,20,20], ['M', 'Separate Chaining', 'Open Addressing w/ Remove'], output=output)
for size in [512, 1024, 2048]:
linked_list = min(timeit.repeat(stmt='''
ht = Hashtable({0})
N = {0} // 4
for i in range(1, N, 1):
flip_every_k(ht, i, N)'''.format(size), setup='''
from ch03.hashtable_linked import Hashtable
from ch03.challenge import flip_every_k''', repeat=7, number=5))/5
hashtable_plus = min(timeit.repeat(stmt='''
ht = DynamicHashtablePlusRemove({0})
N = {0} // 4
for i in range(1, N, 1):
flip_every_k(ht, i, N)'''.format(size), setup='''
from ch03.hashtable_open import DynamicHashtablePlusRemove
from ch03.challenge import flip_every_k''', repeat=7, number=5))/5
tbl.row([size, linked_list, hashtable_plus])
return tbl | 243b5f4972b8eaa2630b6920c2d640d729feae61 | 10,635 |
def closest(lat1, lon1):
"""Return distance (km) and city closest to given coords."""
lat1, lon1 = float(lat1), float(lon1)
min_dist, min_city = None, None
for city, lat2, lon2 in CITIES:
dist = _dist(lat1, lon1, lat2, lon2)
if min_dist is None or dist < min_dist:
min_dist, min_city = dist, city
return min_dist, min_city | 4227e357f41619b6e2076bdcf3bb67b92daa9c4a | 10,636 |
def get_previous_term():
"""
Returns a uw_sws.models.Term object,
for the previous term.
"""
url = "{}/previous.json".format(term_res_url_prefix)
return Term(data=get_resource(url)) | a261bc9d744f8f0b70ac76ac596f922b63ea9a46 | 10,637 |
from re import T
def used(obj: T) -> T:
"""Decorator indicating that an object is being used.
This stops the UnusedObjectFinder from marking it as unused.
"""
_used_objects.add(obj)
return obj | 33d241fe4a0953352ecad2ba306f915a88500d46 | 10,638 |
import scipy
def fit_double_gaussian(x_data, y_data, maxiter=None, maxfun=5000, verbose=1, initial_params=None):
""" Fitting of double gaussian
Fitting the Gaussians and finding the split between the up and the down state,
separation between the max of the two gaussians measured in the sum of the std.
Args:
x_data (array): x values of the data
y_data (array): y values of the data
maxiter (int): maximum number of iterations to perform
maxfun (int): maximum number of function evaluations to make
verbose (int): set to >0 to print convergence messages
initial_params (None or array): optional, initial guess for the fit parameters:
[A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up]
Returns:
par_fit (array): fit parameters of the double gaussian: [A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up]
initial_params (array): initial guess for the fit parameters, either the ones give to the function, or generated by the function: [A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up]
separation (float): separation between the max of the two gaussians measured in the sum of the std
split (float): value that seperates the up and the down level
"""
def func(params): return _cost_double_gaussian(x_data, y_data, params)
maxsignal = np.percentile(x_data, 98)
minsignal = np.percentile(x_data, 2)
if initial_params is None:
A_dn = np.max(y_data[:int((len(y_data) / 2))])
A_up = np.max(y_data[int((len(y_data) / 2)):])
sigma_dn = (maxsignal - minsignal) * 1 / 20
sigma_up = (maxsignal - minsignal) * 1 / 20
mean_dn = minsignal + 1 / 4 * (maxsignal - minsignal)
mean_up = minsignal + 3 / 4 * (maxsignal - minsignal)
initial_params = np.array([A_dn, A_up, sigma_dn, sigma_up, mean_dn, mean_up])
par_fit = scipy.optimize.fmin(func, initial_params, maxiter=maxiter, maxfun=maxfun, disp=verbose >= 2)
# separation is the difference between the max of the gaussians devided by the sum of the std of both gaussians
separation = (par_fit[5] - par_fit[4]) / (abs(par_fit[2]) + abs(par_fit[3]))
# split equal distant to both peaks measured in std from the peak
split = par_fit[4] + separation * abs(par_fit[2])
result_dict = {'parameters initial guess': initial_params, 'separation': separation, 'split': split}
return par_fit, result_dict | 65a54120e2d244301d36d0bba1e25fc711a9d6bb | 10,639 |
def _set_advanced_network_attributes_of_profile(config, profile):
"""
Modify advanced network attributes of profile.
@param config: current configparser configuration.
@param profile: the profile to set the attribute in.
@return: configparser configuration.
"""
config = _set_attribute_of_profile(
config, profile, 'vpc_offering_id', 'VPC offering id', ''
)
return config | f5254f5f055865bf43f0e97f2dcf791bbbe61011 | 10,640 |
import os
def Environ(envstring):
"""Return the String associated with an operating system environment variable
envstring Optional. String expression containing the name of an environment variable.
number Optional. Numeric expression corresponding to the numeric order of the
environment string in the environment-string table. The number argument can be any
numeric expression, but is rounded to a whole number before it is evaluated.
Remarks
If envstring can't be found in the environment-string table, a zero-length string ("")
is returned. Otherwise, Environ returns the text assigned to the specified envstring;
that is, the text following the equal sign (=) in the environment-string table for that environment variable.
"""
try:
envint = int(envstring)
except ValueError:
return os.environ.get(envstring, "")
# Is an integer - need to get the envint'th value
try:
return "%s=%s" % (list(os.environ.keys())[envint], list(os.environ.values())[envint])
except IndexError:
return "" | 9972a427017dcae2917ea01d679d3fbc89ced0a7 | 10,641 |
def ring_bond_equal(b1, b2, reverse=False):
"""Check if two bonds are equal.
Two bonds are equal if the their beginning and end atoms have the same symbol and
formal charge. Bond type not considered because all aromatic (so SINGLE matches DOUBLE).
Parameters
----------
b1 : rdkit.Chem.rdchem.Bond
An RDKit bond object.
b2 : rdkit.Chem.rdchem.Bond
An RDKit bond object.
reverse : bool
Whether to interchange the role of beginning and end atoms of the second
bond in comparison.
Returns
-------
bool
Whether the two bonds are equal.
"""
b1 = (b1.GetBeginAtom(), b1.GetEndAtom())
if reverse:
b2 = (b2.GetEndAtom(), b2.GetBeginAtom())
else:
b2 = (b2.GetBeginAtom(), b2.GetEndAtom())
return atom_equal(b1[0], b2[0]) and atom_equal(b1[1], b2[1]) | e0c5ab25d69f5770dcf58dd284519b3ed593ad33 | 10,642 |
import _warnings
def survey_aligned_velocities(od):
"""
Compute horizontal velocities orthogonal and tangential to a survey.
.. math::
(v_{tan}, v_{ort}) = (u\\cos{\\phi} + v\\sin{\\phi},
v\\cos{\\phi} - u\\sin{\\phi})
Parameters
----------
od: OceanDataset
oceandataset used to compute
Returns
-------
ds: xarray.Dataset
| rot_ang_Vel: Angle to rotate geographical
to survey aligned velocities
| tan_Vel: Velocity component tangential to survey
| ort_Vel: Velocity component orthogonal to survey
See Also
--------
subsample.survey_stations
"""
# Check parameters
_check_instance({'od': od}, 'oceanspy.OceanDataset')
if 'station' not in od._ds.dims:
raise ValueError('oceandatasets must be subsampled using'
' `subsample.survey_stations`')
# Get zonal and meridional velocities
var_list = ['lat', 'lon']
try:
# Add missing variables
varList = ['U_zonal', 'V_merid'] + var_list
od = _add_missing_variables(od, varList)
# Extract variables
U = od._ds['U_zonal']
V = od._ds['V_merid']
except Exception as e:
# Assume U=U_zonal and V=V_zonal
_warnings.warn(("\n{}"
"\nAssuming U=U_zonal and V=V_merid."
"\nIf you are using curvilinear coordinates,"
" run `compute.geographical_aligned_velocities`"
" before `subsample.survey_stations`").format(e),
stacklevel=2)
# Add missing variables
varList = ['U', 'V'] + var_list
od = _add_missing_variables(od, varList)
# Extract variables
U = od._ds['U']
V = od._ds['V']
# Extract varibles
lat = _np.deg2rad(od._ds['lat'])
lon = _np.deg2rad(od._ds['lon'])
# Extract grid
grid = od._grid
# Message
print('Computing survey aligned velocities.')
# Compute azimuth
# Translated from matlab:
# https://www.mathworks.com/help/map/ref/azimuth.html
az = _np.arctan2(_np.cos(lat[1:]).values
* _np.sin(grid.diff(lon, 'station')),
_np.cos(lat[:-1]).values * _np.sin(lat[1:]).values
- _np.sin(lat[:-1]).values
* _np.cos(lat[1:]).values
* _np.cos(grid.diff(lon, 'station')))
az = grid.interp(az, 'station', boundary='extend')
az = _xr.where(_np.rad2deg(az) < 0, _np.pi*2 + az, az)
# Compute rotation angle
rot_ang_rad = _np.pi/2 - az
rot_ang_rad = _xr.where(rot_ang_rad < 0,
_np.pi*2 + rot_ang_rad, rot_ang_rad)
rot_ang_deg = _np.rad2deg(rot_ang_rad)
rot_ang_Vel = rot_ang_deg
long_name = 'Angle to rotate geographical to survey aligned velocities'
rot_ang_Vel.attrs['long_name'] = long_name
rot_ang_Vel.attrs['units'] = 'deg (+: counterclockwise)'
# Rotate velocities
tan_Vel = U*_np.cos(rot_ang_rad) + V*_np.sin(rot_ang_rad)
tan_Vel.attrs['long_name'] = 'Velocity component tangential to survey'
if 'units' in U.attrs:
units = U.attrs['units']
else:
units = ' '
tan_Vel.attrs['units'] = ('{} '
'(+: flow towards station indexed'
' with higher number)'
''.format(units))
ort_Vel = V*_np.cos(rot_ang_rad) - U*_np.sin(rot_ang_rad)
ort_Vel.attrs['long_name'] = 'Velocity component orthogonal to survey'
if 'units' in V.attrs:
units = V.attrs['units']
else:
units = ' '
ort_Vel.attrs['units'] = ('{} '
'(+: flow keeps station indexed'
' with higher number to the right)'
''.format(units))
# Create ds
ds = _xr.Dataset({'rot_ang_Vel': rot_ang_Vel,
'ort_Vel': ort_Vel,
'tan_Vel': tan_Vel}, attrs=od.dataset.attrs)
return _ospy.OceanDataset(ds).dataset | c506f8ca5db1ed6045ac02fb1988900f0ae10451 | 10,643 |
def insertion_sort(numbers):
"""
At worst this is an O(n2) algorithm
At best this is an O(n) algorithm
"""
for index in xrange(1, len(numbers)):
current_num = numbers[index]
current_pos = index
while current_pos > 0 and numbers[current_pos - 1] > current_num:
numbers[current_pos] = numbers[current_pos - 1]
current_pos = current_pos - 1
numbers[current_pos] = current_num
return numbers | d32a73b156f8b469cfcbdda70f349c7f3173d6a9 | 10,644 |
def ratio_shimenreservoir_to_houchiweir():
"""
Real Name: Ratio ShiMenReservoir To HouChiWeir
Original Eqn: Sum Allocation ShiMenReservoir To HouChiWeir/Sum Allcation From ShiMenReservoir
Units: m3/m3
Limits: (None, None)
Type: component
"""
return sum_allocation_shimenreservoir_to_houchiweir() / sum_allcation_from_shimenreservoir() | e49969f53d6641a02b6cea5d3010ac34eb0739fd | 10,645 |
import torch
def torch_profiler_full(func):
"""
A decorator which will run the torch profiler for the decorated function,
printing the results in full.
Note: Enforces a gpu sync point which could slow down pipelines.
"""
@wraps(func)
def wrapper(*args, **kwargs):
with torch.autograd.profiler.profile(use_cuda=True) as prof:
result = func(*args, **kwargs)
print(prof, flush=True)
return result
return wrapper | 7a92eb75d0131c6d151c9908fdcf2e84f6499468 | 10,646 |
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
# Arguments
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
# Returns
Output tensor.
# Raises
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
data_format = normalize_data_format(data_format)
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError('Unexpected bias dimensions %d, '
'expect to be 1 or %d dimensions'
% (len(bias_shape), ndim(x)))
if ndim(x) == 5:
if len(bias_shape) == 1:
new_shape = (1, 1, 1, 1, bias_shape[0])
else:
new_shape = (1,) + bias_shape
new_shape = transpose_shape(new_shape, data_format, spatial_axes=(1, 2, 3))
x += reshape(bias, new_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
if _has_nchw_support():
x = tf.nn.bias_add(x, bias,
data_format='NCHW')
else:
x += reshape(bias, (1, bias_shape[0], 1, 1))
else:
x += reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = tf.nn.bias_add(x, bias,
data_format='NHWC')
else:
x += reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if len(bias_shape) == 1:
new_shape = (1, 1, bias_shape[0])
else:
new_shape = (1,) + bias_shape
new_shape = transpose_shape(new_shape, data_format, spatial_axes=(1,))
x += reshape(bias, new_shape)
else:
x = tf.nn.bias_add(x, bias)
return x | 1b783bbd6f685be336b565d7e5db9c5aa91a1f16 | 10,647 |
def get_news_with_follow(request, user_id):
"""
获取用户关注类型的前30条,未登录300未登录
:param request: 请求对象
:return: Json数据
"""
data = {}
try:
user = User.objects.get(pk=user_id)
follow_set = user.follow_type.value_list('id').all()
follow_list = [x[0] for x in follow_set]
news_set = NewsArticle.objects.filter(type_id__in=follow_list).order_by('-publish_time')[:30]
except db.Error:
data['code'] = 400
data['msg'] = '服务器忙,请稍后再试'
return JsonResponse(data)
except ObjectDoesNotExist:
data['code'] = 505
data['msg'] = '用户不存在'
return JsonResponse(data)
news_list = []
for news in news_set:
item = {
'id': news.id,
'title': news.title,
'type': news.type.name,
'publish_time': news.publish_time
}
news_list.append(item)
data['code'] = 200
data['msg'] = '请求成功'
data['news_list'] = news_list
return JsonResponse(data) | 41e9c8cb20c9c1757a8633d584f738b6a64e4f2b | 10,648 |
import os
def get_ngd_dir(config, absolute = False):
"""Returns the ngd output directory location
Args:
config (dictionary): configuration dictionary
absolute (boolean):
False (default): Relative to project base
True: Absolute
Returns:
(string): string representation of the path to the output
Raises:
Nothing
"""
build_dir = utils.get_build_directory(config, absolute)
ngd_dir = os.path.join(build_dir, NGD_DIR)
return ngd_dir | 1d3ccdc83eaa1b29a8dc86ed0ad9150510ab1398 | 10,649 |
def trigger():
"""Trigger salt-api call."""
data = {'foo': 'bar'}
return request('/hook/trigger', data=data) | 6aa469468711c3c94e0b5a20d9825fc9c0a73d83 | 10,650 |
from typing import List
import math
def fitness_function(cams: List[Coord], pop: List[Coord]) -> int:
"""
Function to calculate number of surveilled citizens.
Check if all the cameras can see them, if any can score increases
"""
score = []
for cit in pop:
test = False
for cam in cams:
if (
math.sqrt(((cam[0] - cit[0]) ** 2) + ((cam[1] - cit[1]) ** 2))
<= view_radius
):
test = True
score.append(test)
return score.count(True) | d10c02a7b182a8c38d8db37f13aec5b4c9def593 | 10,651 |
import requests
def scrape_opening_hours():
""""scrape opening hours from https://www.designmuseumgent.be/bezoek"""
r = requests.get("https://www.designmuseumgent.be/bezoek")
data = r.text
return data | 297a35f3bc4e10d453da495e031fae5ce79ca643 | 10,652 |
import torch
def _demo_mm_inputs(input_shape=(1, 3, 256, 256)):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
target = np.zeros([N, 17, H // 32, W // 32], dtype=np.float32)
mask = np.ones([N, H // 32, W // 32], dtype=np.float32)
joints = np.zeros([N, 30, 17, 2], dtype=np.float32)
img_metas = [{
'image_file':
'test.jpg',
'aug_data': [torch.zeros(1, 3, 256, 256)],
'test_scale_factor': [1],
'base_size': (256, 256),
'center':
np.array([128, 128]),
'scale':
np.array([1.28, 1.28]),
'flip_index':
[0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
} for _ in range(N)]
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'target': [torch.FloatTensor(target)],
'mask': [torch.FloatTensor(mask)],
'joints': [torch.FloatTensor(joints)],
'img_metas': img_metas
}
return mm_inputs | abef4e006fe6e530c5ca372904a40eecc3dbb5b7 | 10,653 |
import random
def compute_one_epoch_baseline():
"""
Function to compute the performance of a simple one epoch baseline.
:return: a line to display (string reporting the experiment results)
"""
best_val_obj_list = []
total_time_list = []
for nb201_random_seed in nb201_random_seeds:
for random_seed in random_seeds:
# randomly sample 256 configurations for the given dataset and NASBench201 seed
# use the same seeds as for our other experiments
random.seed(random_seed)
cfg_list = random.sample(
range(len(df_dict[nb201_random_seed][dataset_name])), 256
)
selected_subset = df_dict[nb201_random_seed][dataset_name].iloc[cfg_list]
# find configuration with the best performance after doing one epoch
max_idx = selected_subset["val_acc_epoch_0"].argmax()
best_configuration = selected_subset.iloc[max_idx]
# find the best validation accuracy of the selected configuration
# as that is the metric that we compare
best_val_obj = best_configuration[epoch_names].max()
# we also need to calculate the time it took for this
# taking into account the number of workers
total_time = selected_subset["eval_time_epoch"].sum() / n_workers
best_val_obj_list.append(best_val_obj)
total_time_list.append(total_time)
line = " & {:.2f} $\pm$ {:.2f}".format(
np.mean(best_val_obj_list), np.std(best_val_obj_list)
)
line += " & {:.1f}h $\pm$ {:.1f}h".format(
np.mean(total_time_list) / 3600, np.std(total_time_list) / 3600
)
line += " & {:.1f}x".format(reference_time / np.mean(total_time_list))
line += " & 1.0 $\pm$ 0.0"
return line | 1bc3b03d49f0bbb8e2213acb31c64367b577aed2 | 10,654 |
import string
import random
def generate_random_string( length ):
"""Generate a random string of a given length containing uppercase and lowercase letters, digits and ASCII punctuation."""
source = string.ascii_lowercase + string.ascii_uppercase + string.digits + string.punctuation
return ''.join( random.choice( source ) for i in range( length ) ) | 9bb1ee7e21f27231e498f48bff505d963565f582 | 10,655 |
import shlex
import psutil
def start_cmd(cmd, use_file=False):
"""Start command and returns proc instance from Popen."""
orig_cmd = ""
# Multi-commands need to be written to a temporary file to execute on Windows.
# This is due to complications with invoking Bash in Windows.
if use_file:
orig_cmd = cmd
temp_file = create_temp_executable_file(cmd)
# The temporary file name will have '\' on Windows and needs to be converted to '/'.
cmd = "bash -c {}".format(temp_file.replace("\\", "/"))
# If 'cmd' is specified as a string, convert it to a list of strings.
if isinstance(cmd, str):
cmd = shlex.split(cmd)
if use_file:
LOGGER.debug("Executing '%s', tempfile contains: %s", cmd, orig_cmd)
else:
LOGGER.debug("Executing '%s'", cmd)
# We use psutil.Popen() rather than subprocess.Popen() in order to cache the creation time of
# the process. This enables us to reliably detect pid reuse in kill_process().
proc = psutil.Popen(cmd, close_fds=True)
LOGGER.debug("Spawned process %s pid %d", proc.name(), proc.pid)
return proc | bdb6c5eef6a9e0a2fc2da16bfa88737410be477a | 10,656 |
from typing import Mapping
from typing import Sequence
def pretty_table(rows, header=None):
"""
Returns a string with a simple pretty table representing the given rows.
Rows can be:
- Sequences such as lists or tuples
- Mappings such as dicts
- Any object with a __dict__ attribute (most plain python objects) which is
equivalent to passing the __dict__ directly.
If no header is given then either all or none of the rows must be sequences
to ensure the correct order. If there are no sequences then the header will be
derived from the keys of the mappings.
>>> print(pretty_table([['a', 'hello', 'c', 1], ['world', 'b', 'd', 2]]))
a | hello | c | 1
world | b | d | 2
>>> print(pretty_table([['a', 'hello', 'c', 1], ['world', 'b', 'd', 2]], header='col1 col2 col3 col4'))
col1 | col2 | col3 | col4
---------------------------
a | hello | c | 1
world | b | d | 2
>>> print(pretty_table([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]))
a | b
-----
1 | 2
3 | 4
>>> class C(object):
... def __init__(self, a, b):
... self.a = a
... self.b = b
...
>>> print(pretty_table([{'a': 1, 'b': 2}, C(3, 4), [5, 6]], header=['b', 'a']))
b | a
-----
2 | 1
4 | 3
5 | 6
>>> print(pretty_table([{'a': 1, 'b': 2}, C(3, 4), [5, 6]]))
Traceback (most recent call last):
...
ValueError: Cannot mix sequences and other types of rows without specifying a header
>>> print(pretty_table([[1, 2], [3, 4, 5]]))
Traceback (most recent call last):
...
ValueError: Mismatched lengths.
First row (len = 2):
[1, 2]
Current row (len = 3):
[3, 4, 5]
>>> print(pretty_table([{'a': 1, 'b': 2}], header='c d'))
Traceback (most recent call last):
....
KeyError: "Tried to access 'c', only keys are: ['a', 'b']"
"""
rows2 = []
if header:
header = ensure_list_if_string(header)
rows2.insert(0, header)
row_type = ['any']
else:
header = []
row_type = [None]
def require_type(t):
if row_type[0] not in (None, t, 'any'):
raise ValueError('Cannot mix sequences and other types of rows without specifying a header')
if row_type[0] is None:
row_type[0] = t
def handle_dict(d):
require_type('mapping')
if not header:
header[:] = sorted(d.keys())
rows2.insert(0, header)
return [helpful_error_dict_get(d, key) for key in header]
for row in rows:
if isinstance(row, Mapping):
row = handle_dict(row)
elif isinstance(row, Sequence):
require_type('sequence')
if rows2 and len(row) != len(rows2[0]):
raise ValueError('Mismatched lengths.\n'
'First row (len = %s):\n%s\n'
'Current row (len = %s):\n%s' %
(len(rows2[0]), rows2[0], len(row), row))
else:
row = handle_dict(row.__dict__)
rows2.append(row)
rows = [[str(cell) for cell in row] for row in rows2]
widths = [max(len(row[i]) for row in rows) for i in range(len(rows[0]))]
lines = [' | '.join(cell.ljust(width) for cell, width in zip(row, widths)).strip()
for row in rows]
if header:
lines.insert(1, '-' * len(lines[0]))
return '\n'.join(lines) | 1b4707932b27277ef22f17631e7a5778a38f99eb | 10,657 |
def interpolate_trajectory(world_map, waypoints_trajectory, hop_resolution=1.0):
"""
Given some raw keypoints interpolate a full dense trajectory to be used by the user.
Args:
world: an reference to the CARLA world so we can use the planner
waypoints_trajectory: the current coarse trajectory
hop_resolution: is the resolution, how dense is the provided trajectory going to be made
Return:
route: full interpolated route both in GPS coordinates and also in its original form.
"""
dao = GlobalRoutePlannerDAO(world_map, hop_resolution)
grp = GlobalRoutePlanner(dao)
grp.setup()
# Obtain route plan
route = []
for i in range(len(waypoints_trajectory) - 1): # Goes until the one before the last.
waypoint = waypoints_trajectory[i]
waypoint_next = waypoints_trajectory[i + 1]
interpolated_trace = grp.trace_route(waypoint, waypoint_next)
for wp_tuple in interpolated_trace:
route.append((wp_tuple[0].transform, wp_tuple[1]))
return route | df544616954868aaa25c86b50420202bea860d9b | 10,658 |
def import_data(filepath="/home/vagrant/countries/NO.txt", mongodb_url="mongodb://localhost:27017"):
"""
Import the adress data into mongodb
CLI Example:
salt '*' mongo.import_data /usr/data/EN.txt
"""
client = MongoClient(mongodb_url)
db = client.demo
address_col = db.address
#Delete collection if present
print("Dropping collection of addresses")
address_col.delete_many({})
#Create compound indices for full text search
address_col.create_index([
("country_code", TEXT),
("postal_code", TEXT),
("place_name", TEXT),
("admin_name1", TEXT),
("admin_name2", TEXT),
("admin_name3", TEXT),
])
# Split line on the tab character since this is the delimiter.
for line in _read_file(filepath):
parts = line.split("\t")
if parts and len(parts) >= 12:
address = {
"country_code": parts[0],
"postal_code": parts[1],
"place_name": parts[2],
"admin_name1": parts[3],
"admin_code1": parts[4],
"admin_name2": parts[5],
"admin_code2": parts[6],
"admin_name3": parts[7],
"admin_code3": parts[8],
"latitude": parts[9],
"longitude": parts[10],
"accuracy": parts[11].strip()
}
address_col.insert(address)
else:
log.error("Element has to few parts to parse")
return "Done importing all data" | 8f80343c60000a8ab988c02bac54e2f748e346b9 | 10,659 |
import async_timeout
import requests
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Enphase Envoy sensor."""
ip_address = config[CONF_IP_ADDRESS]
monitored_conditions = config[CONF_MONITORED_CONDITIONS]
name = config[CONF_NAME]
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
_LOGGER.info("Envoy async_setup_platform called")
f = EnvoyReaderFactory(host=ip_address, username=username, password=password)
# The factory will return a reader based on the SW/FW version found in info.xml
envoy_reader = await f.get_reader()
entities = []
async def async_update_data():
try:
async with async_timeout.timeout(10):
return await envoy_reader.get_data()
except requests.exceptions.HTTPError as err:
raise UpdateFailed(f"Error communicating with API: {err}")
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="EnphaseEnvoy",
update_method=async_update_data,
update_interval= timedelta(seconds=30),
)
# Do an initial data collection so the list with inverters is filled
await coordinator.async_refresh()
# Iterate through the list of sensors configured
for condition in monitored_conditions:
if condition == "inverters":
# The initial data collection made sure we know all inverters that are available at this point
for inverter in coordinator.data['inverters']:
entities.append(
EnvoyInverter(
coordinator,
inverter['serial_number'],
envoy_reader,
condition,
f"{name}{SENSORS[condition][0]} {inverter['serial_number']}",
SENSORS[condition][1],
SENSORS[condition][2],
SENSORS[condition][3]
)
)
else:
entities.append(
Envoy(
coordinator,
coordinator.data['serial_number'],
envoy_reader,
condition,
f"{name}{SENSORS[condition][0]}",
SENSORS[condition][1],
SENSORS[condition][2],
SENSORS[condition][3]
)
)
async_add_entities(entities) | 07e762a8fbcc987b57d38bc8a10d3f51e6fa58a4 | 10,660 |
from .register import PIPING_SIGNS
from .verb import Verb
import ast
def _get_piping_verb_node(calling_node: ast.Call) -> ast.Call:
"""Get the ast node that is ensured the piping verb call
Args:
calling_node: Current Call node
Returns:
The verb call node if found, otherwise None
"""
# check if we have the piping node (i.e. >>)
child = calling_node
parent = getattr(child, "parent", None)
token = PIPING_SIGNS[Verb.CURRENT_SIGN].token
while parent:
if (
# data >> verb(...)
(isinstance(parent, ast.BinOp) and parent.right is child)
or
# data >>= verb(...)
(isinstance(parent, ast.AugAssign) and parent.value is child)
) and isinstance(parent.op, token):
return child
child = parent
parent = getattr(parent, "parent", None)
return None | 2f6be9b382f2bf2e31d39ff9682f5b26618aa1af | 10,661 |
def slot(**kwargs):
"""Creates a SlotConfig instance based on the arguments.
Args:
**kwargs: Expects the following keyed arguments.
in_dist: Distribution for inbound in msec. Optional
in_max_bytes: Optional. Ignored when in_dist is missing.
in_max_pkts: Optional. Ignored when in_dist is missing.
out_dist: Distribution for outbound in msec. Optional
At least one of in_dist and out_dist must be available.
out_max_bytes: Optional. Ignored when out_dist is missing.
out_max_pkts: Optional. Ignored when out_dist is missing.
Returns:
The SlotConfig instance.
Raises:
ValueError: When both in_dist and out_dist are missing.
When an unexpected key is passed.
"""
expected_keys = {'in_dist', 'in_max_bytes', 'in_max_pkts', 'out_dist',
'out_max_bytes', 'out_max_pkts'}
if any(set(kwargs) - expected_keys):
raise ValueError('unexpected args: %s' %
','.join(set(kwargs) - expected_keys))
in_slot = None
out_slot = None
if 'in_dist' in kwargs:
in_slot = Slot(
kwargs['in_dist'],
kwargs['in_max_bytes'] if 'in_max_bytes' in kwargs else 0,
kwargs['in_max_pkts'] if 'in_max_pkts' in kwargs else 0)
if 'out_dist' in kwargs:
out_slot = Slot(
kwargs['out_dist'],
kwargs['out_max_bytes'] if 'out_max_bytes' in kwargs else 0,
kwargs['out_max_pkts'] if 'out_max_pkts' in kwargs else 0)
if not bool(in_slot or out_slot):
raise ValueError('in_dist or out_dist must be defined')
return SlotConfig(in_slot, out_slot) | 4b26f7a805b88a7a6bd03f7d23db7a14d7979eeb | 10,662 |
def get_agent_type(player):
""" Prompts user for info as to the type of agent to be created """
print('There are two kinds of Agents you can initialise.')
print(' 1 - <Human> - This would be a totally manually operated agent.')
print(' You are playing the game yourself.')
print(' 2 - <Random> - This is an agent who simply makes totally random moves.')
print(' They select from the set of all legal moves.')
# print(' 3 - <Engine> - This is an agent which selects moves on the basis of some')
# print(' pre-programmed algorithm.')
print(f'\nWhich type of agent should {player} be?')
while True:
result = input(' : ')
if result.isalpha(): # check response is all letters
result = result.lower() # make them all lowercase
if result.lower() == 'human':
agent_type = result.capitalize()
break
elif result.lower() == 'random':
agent_type = result.capitalize()
break
# elif result.lower() == 'engine':
# not_implemented('Engine')
# continue
elif result.lower() in ('close', 'quit', 'exit', 'no'):
exit_program()
elif result.isnumeric():
if result == '1':
agent_type = 'Human'
break
elif result == '2':
agent_type = 'Random'
break
# elif result == '3':
# not_implemented('Engine')
# continue
agent_name = player
print(f'And their name? Typing nothing will use the default name: {player}')
while True:
result = input(' : ')
if result == '':
break
elif result.isalnum():
if result.lower() in ('close', 'quit', 'exit', 'no'):
exit_program()
agent_name = result
break
else:
print('\n Can only include letters or numbers.\n')
return agent_type, agent_name | 3d0fce9faafaa6c993cb2b5b54a1480268c22ab3 | 10,663 |
def _try_match_and_transform_pattern_1(reduce_op, block) -> bool:
"""
Identify the pattern:
y = gamma * (x - mean) / sqrt(variance + epsilon) + beta
y = x * [gamma * rsqrt(variance + eps)] + (beta - mean * [gamma * rsqrt(variance + eps)])
x --> reduce_mean --> sub --> square --> reduce_mean --> add(epsilon) --> rsqrt
| | ^ |
| | | V
|----------------------- mul (gamma)
| | |
| | --------|---------
| | | |
| | | V
| |----------------------------------------------------------------> mul
| | |
| V |
|--------------------------------------------------------------> mul |
| V
| sub (beta) --> add --> [...]
| ^
|-------------------------------
This pattern corresponds to either layer_norm or instance_norm.
It is instance_norm if all of the following are true:
- input is rank 4
- axes of reduce_mean is [-2, -1] or [-3, -2]
(when [-3, -2], a channel first to channel last transpose would be inserted)
- gamma and beta are rank 1, after squeeze
It is layer_norm if all of the following are true:
- axes is either [-1] or [-1, -2] or [-1, -2, -3] and so on
- rank of gamma and beta is equal to the length of the axes
"""
ops_to_remove = []
root_var = reduce_op.x
if root_var.shape is None:
return False
# check that root_var feeds into exactly 3 ops
if len(list(root_var.child_ops)) != 3:
return False
if root_var.op is not None and not _check_child_op_types(
root_var.op, child_op_types=["reduce_mean", "sub", "mul"]
):
return False
# check 1st reduce_mean op
if not _check_reduce_op(reduce_op):
return False
ops_to_remove.append(reduce_op)
# check 1st sub op
if not _check_child_op_types(reduce_op, ["sub", "mul"], check_order=False):
return False
child_ops_reduce_mean = list(reduce_op.outputs[0].child_ops)
op_a = child_ops_reduce_mean[0]
op_b = child_ops_reduce_mean[1]
sub_op1 = op_a if op_a.op_type == "sub" else op_b
if not (sub_op1.x == root_var and sub_op1.y == reduce_op.outputs[0]):
return False
ops_to_remove.append(sub_op1)
# check square op
square_op = _try_get_child_op_type(sub_op1, "square")
if square_op is None:
return False
ops_to_remove.append(square_op)
# check second reduce mean
reduce_op2 = _try_get_child_op_type(square_op, "reduce_mean")
if not _check_reduce_op(reduce_op2):
return False
ops_to_remove.append(reduce_op2)
# check add op (with epsilon)
add_op1 = _try_get_child_op_type(reduce_op2, "add")
if add_op1 is None:
return False
epsilon_var = add_op1.y if add_op1.x == reduce_op2.outputs[0] else add_op1.x
if epsilon_var.val is None or len(epsilon_var.val.shape) != 0:
return False # must be scalar
ops_to_remove.append(add_op1)
# check rsqrt
rsqrt_op = _try_get_child_op_type(add_op1, "rsqrt")
if rsqrt_op is None:
return False
ops_to_remove.append(rsqrt_op)
# check mul (gamma)
mul_op1 = _try_get_child_op_type(rsqrt_op, "mul")
if mul_op1 is None:
return False
gamma_var = mul_op1.y if mul_op1.x == rsqrt_op.outputs[0] else mul_op1.x
if gamma_var.val is None:
return False
ops_to_remove.append(mul_op1)
# check 2 muls after the gamma mul
if not _check_child_op_types(mul_op1, ["mul", "mul"]):
return False
child_ops = list(mul_op1.outputs[0].child_ops)
mul_op2 = child_ops[0]
mul_op3 = child_ops[1]
mul_op2_other_var = mul_op2.x if mul_op2.y == mul_op1.outputs[0] else mul_op2.y
mul_op3_other_var = mul_op3.x if mul_op3.y == mul_op1.outputs[0] else mul_op3.y
if not (
(mul_op2_other_var == root_var and mul_op3_other_var == reduce_op.outputs[0])
or (mul_op2_other_var == reduce_op.outputs[0] and mul_op3_other_var == root_var)
):
return False
if mul_op2_other_var == root_var:
mul_root_op = mul_op2
mul_mean_op = mul_op3
else:
mul_root_op = mul_op3
mul_mean_op = mul_op2
ops_to_remove.append(mul_mean_op)
ops_to_remove.append(mul_root_op)
# check sub with beta
sub_op2 = _try_get_child_op_type(mul_mean_op, "sub")
if sub_op2 is None:
return False
if sub_op2.y != mul_mean_op.outputs[0]:
return False
beta_var = sub_op2.x
if beta_var.val is None:
return False
ops_to_remove.append(sub_op2)
# check last add op
add_op2 = _try_get_child_op_type(sub_op2, "add")
if add_op2 is None:
return False
if not (add_op2.x == mul_root_op.outputs[0] or add_op2.y == mul_root_op.outputs[0]):
return False
ops_to_remove.append(add_op2)
return _try_apply_transform(
reduce_op, block, gamma_var, beta_var, epsilon_var, add_op2, ops_to_remove
) | f1baecfc53daf731c5b518aadcc11c88508258e3 | 10,664 |
import click
def cli_resize(maxsize):
"""Resize images to a maximum side length preserving aspect ratio."""
click.echo("Initializing resize with parameters {}".format(locals()))
def _resize(images):
for info, image in images:
yield info, resize(image, maxsize)
return _resize | f0695940531c45a88ff1722c002dacc6103962e0 | 10,665 |
import numpy
def _fetch_object_array(cursor):
"""
_fetch_object_array() fetches arrays with a basetype that is not considered
scalar.
"""
arrayShape = cursor_get_array_dim(cursor)
# handle a rank-0 array by converting it to
# a 1-dimensional array of size 1.
if len(arrayShape) == 0:
arrayShape.append(1)
# now create the (empty) array of the correct type and shape
array = numpy.empty(dtype=object,shape=arrayShape)
# goto the first element
cursor_goto_first_array_element(cursor)
# loop over all elements excluding the last one
arraySizeMinOne = array.size - 1
for i in range(arraySizeMinOne):
array.flat[i] = _fetch_subtree(cursor)
cursor_goto_next_array_element(cursor)
# final element then back tp parent scope
array.flat[arraySizeMinOne] = _fetch_subtree(cursor)
cursor_goto_parent(cursor)
return array | 7c84306c0b84a126f401e51bac5896203357380a | 10,666 |
def sldParse(sld_str):
"""
Builds a dictionary from an SldStyle string.
"""
sld_str = sld_str.replace("'", '"').replace('\"', '"')
keys = ['color', 'label', 'quantity', 'opacity']
items = [el.strip() for el in sld_str.split('ColorMapEntry') if '<RasterSymbolizer>' not in el]
sld_items = []
for i in items:
tmp = {}
for k in keys:
v = find_between(i, f'{k}="', '"')
if v: tmp[k] = v
sld_items.append(tmp)
return {
'type': find_between(sld_str, 'type="', '"'),
'extended': find_between(sld_str, 'extended="', '"'),
'items': sld_items
} | 888a2ee3251a0d1149b478d32ccb88ff0e309ec3 | 10,667 |
def x_ideal(omega, phase):
"""
Generates a complex-exponential signal with given frequency
and phase. Does not contain noise
"""
x = np.empty(cfg.N, dtype=np.complex_)
for n in range(cfg.N):
z = 1j*(omega * (cfg.n0+n) * cfg.Ts + phase)
x[n] = cfg.A * np.exp(z)
return x | 87e4df7cbbfe698e5deb461642de72efb6bfffad | 10,668 |
def _wrap_stdout(outfp):
"""
Wrap a filehandle into a C function to be used as `stdout` or
`stderr` callback for ``set_stdio``. The filehandle has to support the
write() and flush() methods.
"""
def _wrap(instance, str, count):
outfp.write(str[:count])
outfp.flush()
return count
return c_stdstream_call_t(_wrap) | f7d773890b17b18855d2d766bd147c67ac7ade3b | 10,669 |
def svn_fs_apply_textdelta(*args):
"""
svn_fs_apply_textdelta(svn_fs_root_t root, char path, char base_checksum,
char result_checksum, apr_pool_t pool) -> svn_error_t
"""
return _fs.svn_fs_apply_textdelta(*args) | d8d228415d8768ec297415a42113e0eb2463163f | 10,670 |
def find(x):
"""
Find the representative of a node
"""
if x.instance is None:
return x
else:
# collapse the path and return the root
x.instance = find(x.instance)
return x.instance | 5143e9d282fb1988d22273996dae36ed587bd9d2 | 10,671 |
def convert_shape(node, **kwargs):
"""Map MXNet's shape_array operator attributes to onnx's Shape operator
and return the created node.
"""
return create_basic_op_node('Shape', node, kwargs) | 7d4414eac78208b0c35d7ab5a9f21ab70a0947ae | 10,672 |
import logging
import os
def _UploadScreenShotToCloudStorage(fh):
""" Upload the given screenshot image to cloud storage and return the
cloud storage url if successful.
"""
try:
return cloud_storage.Insert(cloud_storage.TELEMETRY_OUTPUT,
_GenerateRemotePath(fh), fh.GetAbsPath())
except cloud_storage.CloudStorageError as err:
logging.error('Cloud storage error while trying to upload screenshot: %s',
repr(err))
return '<Missing link>'
finally: # Must clean up screenshot file if exists.
os.remove(fh.GetAbsPath()) | 0f77dc93f0cd708e36e157d9aee664c20536b486 | 10,673 |
import time
def get_timestamp(prev_ts=None):
"""Internal helper to return a unique TimeStamp instance.
If the optional argument is not None, it must be a TimeStamp; the
return value is then guaranteed to be at least 1 microsecond later
the argument.
"""
t = time.time()
t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))
if prev_ts is not None:
t = t.laterThan(prev_ts)
return t | 89751c53679f11efd26b88609887c4a2ed475418 | 10,674 |
def get_element_as_string(element):
"""
turn xml element from etree to string
:param element:
:return:
"""
return lxml.etree.tostring(element, pretty_print=True).decode() | f62945ff4bdd3bea2562ba52a89d8d01c74e0b10 | 10,675 |
import sys
import glob
import os
def get(dirPath):
"""指定したパスのファイル一覧を取得する"""
if sys.version_info.major != 3:
print("Error!!\nPython 3.x is required.")
exit()
if sys.version_info.minor >= 5:
# python 3.5以降
fileList = []
fileList = glob.glob(dirPath, recursive=True)
return fileList
else:
# python3.4以前
fileList = []
for root, dirs, files in os.walk(dirPath):
for filename in files:
fileList.append(os.path.join(root, filename)) # ファイルのみ再帰でいい場合はここまででOK
for dirname in dirs:
fileList.append(os.path.join(root, dirname)) # サブディレクトリまでリストに含めたい場合はこれも書く
print(fileList)
return fileList | a9b66504f1103f094930386a75afbcb8847dacbd | 10,676 |
def _select_ports(count, lower_port, upper_port):
"""Select and return n random ports that are available and adhere to the given port range, if applicable."""
ports = []
sockets = []
for i in range(count):
sock = _select_socket(lower_port, upper_port)
ports.append(sock.getsockname()[1])
sockets.append(sock)
for sock in sockets:
sock.close()
return ports | 2f92cb7e4ab26c54bc799369cd950c4269049291 | 10,677 |
def is_solution_quad(var, coeff, u, v):
"""
Check whether `(u, v)` is solution to the quadratic binary diophantine
equation with the variable list ``var`` and coefficient dictionary
``coeff``.
Not intended for use by normal users.
"""
reps = dict(zip(var, (u, v)))
eq = Add(*[j*i.xreplace(reps) for i, j in coeff.items()])
return _mexpand(eq) == 0 | b19d7678c725a41df755352f5af1ce322f3efad7 | 10,678 |
def is_callable(x):
"""Tests if something is callable"""
return callable(x) | 72584deb62ac5e34e69325466236792c5299a51b | 10,679 |
def validate_version_argument(version, hint=4):
""" validate the version argument against the supported MDF versions. The
default version used depends on the hint MDF major revision
Parameters
----------
version : str
requested MDF version
hint : int
MDF revision hint
Returns
-------
valid_version : str
valid version
"""
if version not in SUPPORTED_VERSIONS:
if hint == 2:
valid_version = "2.14"
elif hint == 3:
valid_version = "3.30"
else:
valid_version = "4.10"
message = (
'Unknown mdf version "{}".'
" The available versions are {};"
' automatically using version "{}"'
)
message = message.format(version, SUPPORTED_VERSIONS, valid_version)
logger.warning(message)
else:
valid_version = version
return valid_version | e29342f78236f043b079cf5e4473f6dccb29d35c | 10,680 |
import torch
def roc_auc(probs, labels):
"""
Computes the area under the receiving operator characteristic between output probs
and labels for k classes.
Source: https://github.com/HazyResearch/metal/blob/master/metal/utils.py
args:
probs (tensor) (size, k)
labels (tensor) (size, 1)
"""
probs = torch.nn.functional.softmax(probs, dim=1)
probs = probs.numpy()
# Convert labels to one-hot indicator format, using the k inferred from probs
labels = hard_to_soft(labels, k=probs.shape[1]).numpy()
return skl.roc_auc_score(labels, probs) | 9ae79a4ff5cbf93d2187857c8ac62014c6fa98f0 | 10,681 |
from typing import Collection
def show_collection(request, collection_id):
"""Shows a collection"""
collection = get_object_or_404(Collection, pk=collection_id)
# New attribute to store the list of problems and include the number of submission in each problem
collection.problem_list = collection.problems()
for problem in collection.problem_list:
problem.num_submissions = problem.num_submissions_by_user(request.user)
problem.solved = problem.solved_by_user(request.user)
return render(request, 'collection.html', {'collection': collection}) | a23efc449258839a7d7bfa0c0a73d889a6891a0f | 10,682 |
from typing import Union
from typing import Callable
from typing import List
def alpha(
data: np.ndarray,
delta: Union[Callable[[int, int], float], List[List[float]], str] = "nominal",
):
"""Calculates Krippendorff's alpha coefficient [1, sec. 11.3] for
inter-rater agreement.
[1] K. Krippendorff, Content analysis: An introduction to its
methodology. Sage publications, 2004.
Args:
-----
data: numpy.ndarray
The data matrix, shape (n_raters, n_units). Each cell (i, j)
represents the value assigned to unit j by rater i, or 0
representing no response.
delta: callable, 2-D array-like or str
The delta metric. Default is the nominal metric, which takes the
value 1 in case c != k and 0 otherwise.
"""
# The following implementation was based off the Wikipedia article:
# https://en.wikipedia.org/wiki/Krippendorff%27s_alpha
# Response categories go from 1 to R, 0 represents no response
R = np.max(data)
counts = np.apply_along_axis(lambda x: np.bincount(x, minlength=R + 1), 0, data).T
count_sum = np.sum(counts, 0)
assert len(count_sum) == R + 1
def ordinal(c: int, k: int):
if k < c:
c, k = k, c
s = (
sum(count_sum[g] for g in range(c, k + 1))
- (count_sum[c] + count_sum[k]) / 2
)
return s ** 2
if isinstance(delta, str):
delta = {
"nominal": Deltas.nominal,
"ordinal": ordinal,
"interval": Deltas.interval,
}[delta]
if not callable(delta):
try:
delta[0][0]
except IndexError:
raise TypeError("delta must be either str, callable or 2D array.")
def _delta(c, k):
new_delta = delta
return new_delta[c][k]
delta = _delta
m_u = np.sum(counts[:, 1:], 1)
valid = m_u >= 2
counts = counts[valid]
m_u = m_u[valid]
data = data[:, valid]
n = np.sum(m_u)
n_cku = np.matmul(counts[:, :, None], counts[:, None, :])
for i in range(R + 1):
n_cku[:, i, i] = counts[:, i] * (counts[:, i] - 1)
D_o = 0
for c in range(1, R + 1):
for k in range(1, R + 1):
D_o += delta(c, k) * n_cku[:, c, k]
D_o = np.sum(D_o / (n * (m_u - 1)))
D_e = 0
P_ck = np.bincount(data.flat)
for c in range(1, R + 1):
for k in range(1, R + 1):
D_e += delta(c, k) * P_ck[c] * P_ck[k]
D_e /= n * (n - 1)
return 1 - D_o / D_e | 98c86120287d9d4b2c7f10ad074702c2088ade8d | 10,683 |
def enforce(action, target, creds, do_raise=True):
"""Verifies that the action is valid on the target in this context.
:param creds: user credentials
:param action: string representing the action to be checked, which
should be colon separated for clarity.
Or it can be a Check instance.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary
representing the location of the object e.g.
{'project_id': object.project_id}
:param rule_dict: instance of oslo_policy.policy.Rules, it's
actually a dict, with keys are the actions
to be protected and values are parsed Check trees.
:raises: `exception.Forbidden` if verification fails.
Actions should be colon separated for clarity. For example:
* identity:list_users
"""
init()
# Add the exception arguments if asked to do a raise
extra = {}
if do_raise:
extra.update(exc=exception.ForbiddenAction, service=action[0],
permission=action[1], do_raise=do_raise)
return _ENFORCER.enforce(action, target, creds, **extra) | 16cdefe38bfc56f529a735b8517d94ade7db780d | 10,684 |
from operator import and_
from operator import or_
def query_data(session, agency_code, period, year):
""" Request A file data
Args:
session: DB session
agency_code: FREC or CGAC code for generation
period: The period for which to get GTAS data
year: The year for which to get GTAS data
Returns:
The rows using the provided dates for the given agency.
"""
# set a boolean to determine if the original agency code is frec or cgac
frec_provided = len(agency_code) == 4
tas_gtas = tas_gtas_combo(session, period, year)
# Make a list of FRECs to compare to for 011 AID entries
frec_list = []
if not frec_provided:
frec_list = session.query(FREC.frec_code).select_from(outerjoin(CGAC, FREC, CGAC.cgac_id == FREC.cgac_id)).\
filter(CGAC.cgac_code == agency_code).all()
# Group agencies together that need to be grouped
agency_array = []
if agency_code == '097':
agency_array = ['017', '021', '057', '097']
elif agency_code == '1601':
agency_array = ['1601', '016']
elif agency_code == '1125':
agency_array = ['1125', '011']
# Save the ATA filter
agency_filters = []
if not agency_array:
agency_filters.append(tas_gtas.c.allocation_transfer_agency == agency_code)
else:
agency_filters.append(tas_gtas.c.allocation_transfer_agency.in_(agency_array))
# Save the AID filter
if agency_code == '097' and not frec_provided:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.agency_identifier.in_(agency_array)))
elif not frec_provided:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.agency_identifier == agency_code))
else:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.fr_entity_type == agency_code))
# If we're checking a CGAC, we want to filter on all of the related FRECs for AID 011, otherwise just filter on
# that FREC
if frec_list:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.agency_identifier == '011',
tas_gtas.c.fr_entity_type.in_(frec_list)))
elif not frec_provided:
agency_filters.append(and_(tas_gtas.c.allocation_transfer_agency.is_(None),
tas_gtas.c.agency_identifier == '011',
tas_gtas.c.fr_entity_type == agency_code))
rows = initial_query(session, tas_gtas.c).\
filter(func.coalesce(tas_gtas.c.financial_indicator2, '') != 'F').\
filter(or_(*agency_filters)).\
group_by(tas_gtas.c.allocation_transfer_agency,
tas_gtas.c.agency_identifier,
tas_gtas.c.beginning_period_of_availa,
tas_gtas.c.ending_period_of_availabil,
tas_gtas.c.availability_type_code,
tas_gtas.c.main_account_code,
tas_gtas.c.sub_account_code)
return rows | 0eb856f699eebf95bf10ff2d3dd6c9a72ec0843a | 10,685 |
def vocublary(vec_docs):
""" vocabulary(vec_docs) -> tuple: (int avg_doc_len, updated vec_docs, corpus Vocabulary dictionary {"word": num_docs_have__this_term, ...})
vec_docs = list of documents as dictionaries [{ID:"word_i word_i+1 ..."} , {ID:"word_i word_i+1"}, ...}]
"""
vocabulary = {}
count_vec = [] #used for aggregating doc lengths in a list to determining avg_doc_len
#Extract len of docs anonymously, convert vec_docs values to c(w,d), Create corups Vocabulary as c(d,w)
for key,value in vec_docs.items(): #recall: {key = "doc_ID": value = [list, of, words, in, each, document]}
doc_words = {}
count_vec.append(len(value))
for word in value:
#convert doc word list into dict storing c(w,d) ∈ D
if word in doc_words:
doc_words[word] = doc_words[word] + 1
else:
doc_words[word] = 1
#Next, create vocubulary c(d,w) ∈ Corpus
for word,count in doc_words.items():
if word in vocabulary:
vocabulary[word] = vocabulary[word] + 1
else:
vocabulary[word] = 1
#last convert {ID:[list,of,words]} -> {ID: {dict:1,of:1,word:1,counts:2} }
vec_docs[key] = doc_words
avg_dl = sum(count_vec) / len(count_vec)
return (avg_dl,vocabulary) | 4e6f4df1e36c2fdf3d7d1d20750d74f91a0214b6 | 10,686 |
import os
import traceback
def main(unused_argv):
"""Main entry.
Args:
* unused_argv: unused arguments (after FLAGS is parsed)
"""
try:
# setup the TF logging routine
tf.logging.set_verbosity(tf.logging.INFO)
# set the learning phase to 'inference'; data format may be changed if needed
set_learning_phase()
# inspect the model
meta_path = os.path.join(FLAGS.model_dir_dst, 'model.ckpt.meta')
inspect_model(meta_path)
# exit normally
return 0
except ValueError:
traceback.print_exc()
return 1 # exit with errors | 7f1af5f6055629b9c80ff9144713ca146ce9b9ed | 10,687 |
import warnings
def _build_type(type_, value, property_path=None):
""" Builds the schema definition based on the given type for the given value.
:param type_: The type of the value
:param value: The value to build the schema definition for
:param List[str] property_path: The property path of the current type,
defaults to None, optional
:return: The built schema definition
:rtype: Dict[str, Any]
"""
if not property_path:
property_path = []
for (type_check, builder) in (
(is_enum_type, _build_enum_type),
(is_null_type, _build_null_type),
(is_bool_type, _build_bool_type),
(is_string_type, _build_string_type),
(is_integer_type, _build_integer_type),
(is_number_type, _build_number_type),
(is_array_type, _build_array_type),
(is_object_type, _build_object_type),
):
if type_check(type_):
return builder(value, property_path=property_path)
# NOTE: warning ignores type None (as that is the config var default)
if type_:
warnings.warn(f"unhandled translation for type {type_!r} with value {value!r}")
return {} | 7033e5f8bc5cd5b667f25b8554bfe3296191762f | 10,688 |
def lidar_2darray_to_rgb(array: np.ndarray) -> np.ndarray:
"""Returns a `NumPy` array (image) from a 4 channel LIDAR point cloud.
Args:
array: The original LIDAR point cloud array.
Returns:
The `PyGame`-friendly image to be visualized.
"""
# Get array shapes.
W, H, C = array.shape
assert C == 2
# Select channel.
img = np.c_[array, np.zeros(shape=(W, H, 1))]
# Convert to 8-bit image.
img = 255 * (img / img.max())
return img | 69e2de793b9280b269ac8ab9f3d313e51c932c8c | 10,689 |
import argparse
def args():
"""
--all (some subset that is useful for someone)
--packages (maybe positional?)
"""
parser = argparse.ArgumentParser("serviced-tests")
parser.add_argument("-v", "--verbose", action="store_true", help="verbose logging")
types = parser.add_argument_group("Test Type")
types.add_argument("--unit", action="store_true", help="pass the 'unit' build tag")
types.add_argument("--integration", action="store_true", help="pass the 'integration' build tag")
options = parser.add_argument_group("Test Options")
options.add_argument("--quick", action="store_true", help="don't run tests with the '!quick' build constraint")
options.add_argument("--root", action="store_true", help="run the tests as the root user")
options.add_argument("--race", action="store_true", help="run tests with race detection")
options.add_argument("--cover", action="store_true", help="run tests with coverage")
options.add_argument("--tag", action="append", help="optional extra build tag (may be specified multiple times)")
options.add_argument("--include_vendor", action="store_true", dest="include_vendor", help="run tests against the vendor directory")
coverage = parser.add_argument_group("Coverage Options")
coverage.add_argument("--cover-html", required=False, help="output file for HTML coverage report")
coverage.add_argument("--cover-xml", required=False, help="output file for Cobertura coverage report")
fixtures = parser.add_argument_group("Fixture Options")
fixtures.add_argument("--elastic", action="store_true", help="start an elastic server before the test run")
fixtures.add_argument("--elastic-port", type=int, help="elastic server port", default=9202)
parser.add_argument("--packages", nargs="*", help="serviced packages to test, relative to the serviced root (defaults to ./...)")
parser.add_argument("arguments", nargs=argparse.REMAINDER, help="optional arguments to be passed through to the test runner")
return parser.parse_args() | b2a6b83b1ee02fc5ae2ba3130757ca50d9d954fe | 10,690 |
from typing import Tuple
from typing import Union
from typing import List
from typing import Dict
import tqdm
import torch
def rollout(dataset: RPDataset,
env: RPEnv,
policy: Policy,
batch_size: int,
num_workers: int = 4,
disable_progress_bar: bool = False,
**kwargs) -> Tuple[Tensor, Union[List, Dict]]:
"""Policy evaluation rollout
Args:
dataset: dataset to evaluate on
env: the routing simulation environment
policy: policy model
batch_size: size of mini-batches
num_workers: num cores to distribute data loading
disable_progress_bar: flag to disable tqdm progress bar
Returns:
tensor of final costs per instance
"""
costs, infos = [], []
for batch in tqdm(
DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=lambda x: x, # identity -> returning simple list of instances
shuffle=False # do not random shuffle data in rollout!
),
disable=disable_progress_bar,
):
with torch.no_grad():
cost, info = eval_episode(batch, env, policy, **kwargs)
costs.append(cost.cpu())
infos.append(info)
env.clear_cache()
return torch.cat(costs, dim=0), infos | 1b88ff01b86d567de2c78d4450825e4fd1120311 | 10,691 |
def regex_ignore_case(term_values):
"""
turn items in list "term_values" to regexes with ignore case
"""
output=[]
for item in term_values:
output.append(r'(?i)'+item)
return output | 5dbf5fba758fe91fb0bbfaed6ab3cfa5f05357eb | 10,692 |
from typing import Callable
def importance_sampling_integrator(function: Callable[..., np.ndarray],
pdf: Callable[..., np.ndarray],
sampler: Callable[..., int],
n: int = 10000,
seed: int = 1
) -> np.array:
"""
Parameters
----------
function : TYPE
DESCRIPTION.
pdf : TYPE
DESCRIPTION.
sampler : TYPE
DESCRIPTION.
n : TYPE, optional
DESCRIPTION. The default is 10000.
seed : TYPE, optional
DESCRIPTION. The default is 1.
Returns
-------
TYPE
DESCRIPTION.
"""
# Set a random seed.
np.random.seed(seed)
# Generate n samples from the probability distribution.
samples = sampler(n)
#ipdb.set_trace()
# Evaluate the function at the samples and divide by the probability
# density of the distribution at those samples.
sampled_values = function(samples) / pdf(samples)
# Add the estimate of the integral to the estimates list.
estimates = np.mean(sampled_values, axis=1) # Altered this for the batching.
# Return the mean of the estimates as the estimate of the integral.
return np.array(estimates) | 14b6abfaa38f37430ec0e9abcd330f1392543978 | 10,693 |
from apex.amp._amp_state import _amp_state
import torch
def r1_gradient_penalty_loss(discriminator,
real_data,
mask=None,
norm_mode='pixel',
loss_scaler=None,
use_apex_amp=False):
"""Calculate R1 gradient penalty for WGAN-GP.
R1 regularizer comes from:
"Which Training Methods for GANs do actually Converge?" ICML'2018
Diffrent from original gradient penalty, this regularizer only penalized
gradient w.r.t. real data.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
mask (Tensor): Masks for inpainting. Default: None.
norm_mode (str): This argument decides along which dimension the norm
of the gradients will be calculated. Currently, we support ["pixel"
, "HWC"]. Defaults to "pixel".
Returns:
Tensor: A tensor for gradient penalty.
"""
batch_size = real_data.shape[0]
real_data = real_data.clone().requires_grad_()
disc_pred = discriminator(real_data)
if loss_scaler:
disc_pred = loss_scaler.scale(disc_pred)
elif use_apex_amp:
_loss_scaler = _amp_state.loss_scalers[0]
disc_pred = _loss_scaler.loss_scale() * disc_pred.float()
gradients = autograd.grad(
outputs=disc_pred,
inputs=real_data,
grad_outputs=torch.ones_like(disc_pred),
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
if loss_scaler:
# unscale the gradient
inv_scale = 1. / loss_scaler.get_scale()
gradients = gradients * inv_scale
elif use_apex_amp:
inv_scale = 1. / _loss_scaler.loss_scale()
gradients = gradients * inv_scale
if mask is not None:
gradients = gradients * mask
if norm_mode == 'pixel':
gradients_penalty = ((gradients.norm(2, dim=1))**2).mean()
elif norm_mode == 'HWC':
gradients_penalty = gradients.pow(2).reshape(batch_size,
-1).sum(1).mean()
else:
raise NotImplementedError(
'Currently, we only support ["pixel", "HWC"] '
f'norm mode but got {norm_mode}.')
if mask is not None:
gradients_penalty /= torch.mean(mask)
return gradients_penalty | d142375d89f39ae2510575d0615a878bd67e9574 | 10,694 |
import json
import re
def visualize(args):
"""Return the visualized output"""
ret = ""
cmd_list = json.load(args.results)['cmd_list']
cmd_list = util.filter_cmd_list(cmd_list, args.labels_to_include, args.labels_to_exclude)
(cmd_list, label_map) = util.translate_dict(cmd_list, args.label_map)
for cmd in cmd_list:
values = []
if 'jobs' in cmd:
for job in cmd['jobs']:
if 'results' in job:
for res in job['results']:
match_list = re.findall(args.parse_regex, res['stdout'])
if res['success'] and len(match_list) > 0:
for match in match_list:
values.append(args.py_type(match))
else:
values.append("N/A")
succeed_values = util.extract_succeed_results(cmd, args.parse_regex, args.py_type)
mean = util.mean(succeed_values)
std = util.standard_deviation(succeed_values)
if args.csv:
sep = args.csv_separator
ret += "%s%s %.4f%s %.4f" % (label_map[cmd['label']], sep, mean, sep, std)
else:
ret += "%s: %s" % (label_map[cmd['label']], values)
if len(succeed_values) > 0:
ret += " %.4f" % mean
ret += " (%.4f)" % std
ret += "\n"
return ret | 8a9e655adfc9713785f96ab487d579a19d9d09b2 | 10,695 |
def send_request(apikey, key_root, data, endpoint):
"""Send a request to the akismet server and return the response."""
url = 'http://%s%s/%s/%s' % (
key_root and apikey + '.' or '',
AKISMET_URL_BASE,
AKISMET_VERSION,
endpoint
)
try:
response = open_url(url, data=url_encode(data))
except:
return
try:
return response.data.strip()
finally:
response.close() | aea5ac8eb0b8b91002e680376c6f2647a631e58c | 10,696 |
def request_set_bblk_trace_options(*args):
"""
request_set_bblk_trace_options(options)
Post a 'set_bblk_trace_options()' request.
@param options (C++: int)
"""
return _ida_dbg.request_set_bblk_trace_options(*args) | 728d7e2a7a0ef0085d4bb72763b2a019d89896ec | 10,697 |
def range_str(values: iter) -> str:
"""
Given a list of integers, returns a terse string expressing the unique values.
Example:
indices = [0, 1, 2, 3, 4, 7, 8, 11, 15, 20]
range_str(indices)
>> '0-4, 7-8, 11, 15 & 20'
:param values: An iterable of ints
:return: A string of unique value ranges
"""
trial_str = ''
values = list(set(values))
for i in range(len(values)):
if i == 0:
trial_str += str(values[i])
elif values[i] - (values[i - 1]) == 1:
if i == len(values) - 1 or values[i + 1] - values[i] > 1:
trial_str += f'-{values[i]}'
else:
trial_str += f', {values[i]}'
# Replace final comma with an ampersand
k = trial_str.rfind(',')
if k > -1:
trial_str = f'{trial_str[:k]} &{trial_str[k + 1:]}'
return trial_str | 85dedc97342b07dcb2a8dda753768309aa31ed43 | 10,698 |
import json
def analyze(request):
"""
利用soar分析SQL
:param request:
:return:
"""
text = request.POST.get('text')
instance_name = request.POST.get('instance_name')
db_name = request.POST.get('db_name')
if not text:
result = {"total": 0, "rows": []}
else:
soar = Soar()
if instance_name != '' and db_name != '':
soar_test_dsn = SysConfig().get('soar_test_dsn')
# 获取实例连接信息
instance_info = Instance.objects.get(instance_name=instance_name)
online_dsn = "{user}:{pwd}@{host}:{port}/{db}".format(user=instance_info.user,
pwd=instance_info.raw_password,
host=instance_info.host,
port=instance_info.port,
db=db_name)
else:
online_dsn = ''
soar_test_dsn = ''
args = {"report-type": "markdown",
"query": '',
"online-dsn": online_dsn,
"test-dsn": soar_test_dsn,
"allow-online-as-test": "false"}
rows = generate_sql(text)
for row in rows:
args['query'] = row['sql'].replace('"', '\\"').replace('`', '').replace('\n', ' ')
cmd_args = soar.generate_args2cmd(args=args, shell=True)
stdout, stderr = soar.execute_cmd(cmd_args, shell=True).communicate()
row['report'] = stdout if stdout else stderr
result = {"total": len(rows), "rows": rows}
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json') | a033774727242783357a35a6608d7154f77bc016 | 10,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.