content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import sys
def alpha_161(code, end_date=None, fq="pre"):
"""
公式:
MEAN(MAX(MAX((HIGH-LOW),ABS(DELAY(CLOSE,1)-HIGH)),ABS(DELAY(CLOSE,1)-LOW)),12)
Inputs:
code: 股票池
end_date: 查询日期
Outputs:
因子的值
"""
end_date = to_date_str(end_date)
func_name = sys._getframe().f_code.co_name
return JQDataClient.instance().get_alpha_191(**locals()) | befa8b4d2c3148e8b0183873dd85fb225b90d753 | 15,200 |
def WeightedCrossEntropyLoss(alpha=0.5):
"""
Calculates the Weighted Cross-Entropy Loss, which applies a factor alpha, allowing one to
trade off recall and precision by up- or down-weighting the cost of a positive error relative
to a negative error.
A value alpha > 1 decreases the false negative count, hence increasing the recall.
Conversely, setting alpha < 1 decreases the false positive count and increases the precision.
"""
def _gradient(yhat, dtrain, alpha):
"""Compute the weighted cross-entropy gradient.
Args:
yhat (np.array): Margin predictions
dtrain: The XGBoost / LightGBM dataset
alpha (float): Scale applied
Returns:
grad: Weighted cross-entropy gradient
"""
y = dtrain.get_label()
yhat = clip_sigmoid(yhat)
grad = (y * yhat * (alpha - 1)) + yhat - (alpha * y)
return grad
def _hessian(yhat, dtrain, alpha):
"""Compute the weighted cross-entropy hessian.
Args:
yhat (np.array): Margin predictions
dtrain: The XGBoost / LightGBM dataset
alpha (float): Scale applied
Returns:
hess: Weighted cross-entropy Hessian
"""
y = dtrain.get_label()
yhat = clip_sigmoid(yhat)
hess = (y * (alpha - 1) + 1) * yhat * (1 - yhat)
return hess
def weighted_cross_entropy(
yhat,
dtrain,
alpha=alpha
):
"""
Calculate gradient and hessian for weight cross-entropy,
Args:
yhat (np.array): Predictions
dtrain: The XGBoost / LightGBM dataset
alpha (float): Scale applied
Returns:
grad: Weighted cross-entropy gradient
hess: Weighted cross-entropy Hessian
"""
grad = _gradient(yhat, dtrain, alpha=alpha)
hess = _hessian(yhat, dtrain, alpha=alpha)
return grad, hess
return weighted_cross_entropy | 5746bab38e39dd6f688ea648f29e7c30d7827466 | 15,201 |
def expand_stylesheet(abbr: str, config: Config):
"""
Expands given *stylesheet* abbreviation (a special Emmet abbreviation designed for
stylesheet languages like CSS, SASS etc.) and outputs it according to options
provided in config
"""
return stringify_stylesheet(stylesheet_abbreviation(abbr, config), config) | 17a65d1d6f6f2205a71e6e0ab653ef723672d756 | 15,202 |
def generate_legacy_dir(ctx, config, manifest, layers):
"""Generate a intermediate legacy directory from the image represented by the given layers and config to /image_runfiles.
Args:
ctx: the execution context
config: the image config file
manifest: the image manifest file
layers: the list of layer tarballs
Returns:
The filepaths generated and runfiles to be made available.
config: the generated config file.
layers: the generated layer tarball files.
temp_files: all the files generated to be made available at runtime.
"""
# Construct image runfiles for input to pusher.
image_files = [] + layers
if config:
image_files += [config]
if manifest:
image_files += [manifest]
path = "image_runfiles/"
layer_files = []
# Symlink layers to ./image_runfiles/<i>.tar.gz
for i in range(len(layers)):
layer_symlink = ctx.actions.declare_file(path + str(i) + ".tar.gz")
layer_files.append(layer_symlink)
ctx.actions.run_shell(
outputs = [layer_symlink],
inputs = [layers[i]],
command = "ln {src} {dst}".format(
src = layers[i].path,
dst = layer_symlink.path,
),
)
# Symlink config to ./image_runfiles/config.json
config_symlink = ctx.actions.declare_file(path + "config.json")
ctx.actions.run_shell(
outputs = [config_symlink],
inputs = [config],
command = "ln {src} {dst}".format(
src = config.path,
dst = config_symlink.path,
),
)
return {
"config": config_symlink,
"layers": layer_files,
"temp_files": [config_symlink] + layer_files,
} | 6001820e63ac3586625f7ca29311d717cc1e4c07 | 15,203 |
def workflow_key(workflow):
"""Return text search key for workflow"""
# I wish tags were in the manifest :(
elements = [workflow['name']]
elements.extend(workflow['tags'])
elements.extend(workflow['categories'])
elements.append(workflow['author'])
return ' '.join(elements) | 57347705b605e68a286dd953de5bb157ac50628e | 15,204 |
def get_logits(input_ids,mems,input_mask,target_mask):
"""Builds the graph for calculating the final logits"""
is_training = False
cutoffs = []
train_bin_sizes = []
eval_bin_sizes = []
proj_share_all_but_first = True
n_token = FLAGS.n_token
batch_size = FLAGS.batch_size
features = {"input": input_ids}
inp = tf.transpose(features["input"], [1, 0])
input_mask = tf.transpose(input_mask, [1, 0])
target_mask = tf.transpose(target_mask, [1, 0])
tgt = None
inp_perms, tgt_perms, head_tgt = None, None, None
if FLAGS.init == "uniform":
initializer = tf.initializers.random_uniform(
minval=-FLAGS.init_range,
maxval=FLAGS.init_range,
seed=None)
elif FLAGS.init == "normal":
initializer = tf.initializers.random_normal(
stddev=FLAGS.init_std,
seed=None)
proj_initializer = tf.initializers.random_normal(
stddev=FLAGS.proj_init_std,
seed=None)
tie_projs = [False for _ in range(len(cutoffs) + 1)]
if proj_share_all_but_first:
for i in range(1, len(tie_projs)):
tie_projs[i] = True
tf.logging.info("Vocab size : {}".format(n_token))
tf.logging.info("Batch size : {}".format(batch_size))
logits, new_mems = model.transformer(
dec_inp=inp,
target=tgt,
mems=mems,
n_token=n_token,
n_layer=FLAGS.n_layer,
d_model=FLAGS.d_model,
d_embed=FLAGS.d_embed,
n_head=FLAGS.n_head,
d_head=FLAGS.d_head,
d_inner=FLAGS.d_inner,
dropout=0,
dropatt=0,
initializer=initializer,
is_training=is_training,
mem_len=FLAGS.seq_len+FLAGS.max_decode_length,
cutoffs=cutoffs,
div_val=1,
tie_projs=tie_projs,
input_perms=inp_perms,
target_perms=tgt_perms,
head_target=head_tgt,
same_length=FLAGS.same_length,
clamp_len=FLAGS.clamp_len,
use_tpu=FLAGS.use_tpu,
untie_r=FLAGS.untie_r,
proj_same_dim=True,
bidirectional_mask=FLAGS.bi_mask,
infer=True,
target_mask=target_mask,
input_mask=input_mask,
tgt_len=1)
return logits,new_mems | 4719104fdbb693411a9614e8a4048cbf6b932d1f | 15,205 |
import os
def serve_protocols(environ, start_response):
"""Serve a list of all protocols.
"""
status = '200 OK'
response_headers = [('Content-type', 'text/html')]
start_response(status, response_headers)
repo = os.path.join(APP_ROOT, 'storage')
protocols = [f_name for f_name in os.listdir(repo) if f_name.endswith('.protocol')]
if not check_test_index():
add_ids_test_index()
doc = [PROTOCOL_LIST % write_protocols(protocols)]
return doc | 2c4ae7b64b5ab5c6f56acd3c8f283dc895b0f594 | 15,206 |
def _api_get_scripts(name, output, kwargs):
""" API: accepts output """
return report(output, keyword="scripts", data=list_scripts()) | 88f002646cdec6911a76aa16cec2939b32cffd33 | 15,207 |
import requests
def get_children(key):
"""
Lists all direct child usages for a name usage
:return: list of species
"""
api_url = 'http://api.gbif.org/v1/species/{key}/children'.format(
key=key
)
try:
response = requests.get(api_url)
json_response = response.json()
if json_response['results']:
return json_response['results']
return None
except (HTTPError, KeyError) as e:
print(e)
return None | 8d4a4ca4c1231ca2c7d98f7c0cede5ecdac003d5 | 15,208 |
def _extend(obj, *args):
"""
adapted from underscore-py
Extend a given object with all the properties in
passed-in object(s).
"""
args = list(args)
for src in args:
obj.update(src)
for k, v in src.items():
if v is None:
del obj[k]
return obj | 9fe1bffcd05ac44a3587b53a71f592c462975482 | 15,209 |
from asgiref.sync import async_to_sync
import functools
def async_test(func):
"""
Wrap async_to_sync with another function because Pytest complains about
collecting the resulting callable object as a test because it's not a true
function:
PytestCollectionWarning: cannot collect 'test_foo' because it is not a
function.
"""
# inner import because for Python 3.6+ tests only
sync_func = async_to_sync(func)
@functools.wraps(func)
def wrapper(*args, **kwargs):
return sync_func(*args, **kwargs)
return wrapper | 10127bd083230404a7bb79d764502e6354f44b5a | 15,210 |
import logging
def get_logger(lname, logfile):
"""logging setup
logging config - to be moved to file at some point
"""
logger = logging.getLogger(lname)
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(levelname)s:\t%(message)s'
},
'verbose': {
'format': '%(levelname)s:\t%(message)s\tFROM: %(name)s'
}
},
'handlers': {
'stdout': {
'level': 'INFO',
'formatter': 'verbose',
'class': 'logging.StreamHandler'
},
'logfile': {
'level': 'INFO',
'formatter': 'standard',
'class': 'logging.FileHandler',
'filename': logfile
}
},
'loggers': {
'': {
'handlers': ['stdout', 'logfile'],
'level': 'INFO',
'propagate': True
}
}
})
return logger | 0a4795f383077b52b84afb882f090e0f9140fd0f | 15,211 |
def get_percentiles(data, percentiles, integer_valued=True):
"""Returns a dict of percentiles of the data.
Args:
data: An unsorted list of datapoints.
percentiles: A list of ints or floats in the range [0, 100] representing the
percentiles to compute.
integer_valued: Whether or not the values are all integers. If so,
interpolate to the nearest datapoint (instead of computing a fractional
value between the two nearest datapoints).
Returns:
A dict mapping each element of percentiles to the computed result.
"""
# Ensure integer datapoints for cleaner binning if necessary.
interpolation = 'nearest' if integer_valued else 'linear'
results = np.percentile(data, percentiles, interpolation=interpolation)
return {percentiles[i]: results[i] for i in range(len(percentiles))} | 763c0c1a724b55ac4bb5b83a6831fa5aa44993fd | 15,212 |
def estimate_poster_dedpul(diff, alpha=None, quantile=0.05, alpha_as_mean_poster=False, max_it=100, **kwargs):
"""
Estimates posteriors and priors alpha (if not provided) of N in U with dedpul method
:param diff: difference of densities f_p / f_u for the sample U, np.array (n,), output of estimate_diff()
:param alpha: priors, share of N in U (estimated if None)
:param quantile: if alpha is None, relaxation of the estimate of alpha;
here alpha is estimaeted as infinum, and low quantile is its relaxed version;
share of posteriors probabilities that we allow to be negative (with the following zeroing-out)
:param kwargs: dummy
:return: tuple (alpha, poster), e.g. (priors, posteriors) of N in U for the U sample, represented by diff
"""
if alpha_as_mean_poster and (alpha is not None):
poster = 1 - diff * (1 - alpha)
poster[poster < 0] = 0
cur_alpha = np.mean(poster)
if cur_alpha < alpha:
left_border = alpha
right_border = 1
else:
left_border = 0
right_border = alpha
poster_zero = 1 - diff
poster_zero[poster_zero < 0] = 0
if np.mean(poster_zero) > alpha:
left_border = -50
right_border = 0
# return 0, poster_zero
it = 0
try_alpha = cur_alpha
while (abs(cur_alpha - alpha) > kwargs.get('tol', 10 ** -5)) and (it < max_it):
try_alpha = (left_border + (right_border - left_border) / 2)
poster = 1 - diff * (1 - try_alpha)
poster[poster < 0] = 0
cur_alpha = np.mean(poster)
if cur_alpha > alpha:
right_border = try_alpha
else:
left_border = try_alpha
it += 1
alpha = try_alpha
if it >= max_it:
print('Exceeded maximal number of iterations in finding mean_poster=alpha')
else:
if alpha is None:
alpha = 1 - 1 / max(np.quantile(diff, 1 - quantile, interpolation='higher'), 1)
poster = 1 - diff * (1 - alpha)
poster[poster < 0] = 0
return alpha, poster | 5d7fe900e379418f38f6097ac8024984fc2e66fa | 15,213 |
def get_short_topic_name(test_run_name):
"""Returns the collection name for the DLQ.
Keyword arguments:
test_run_name -- the unique id for this test run
"""
return test_run_name[3:] if test_run_name.startswith("db.") else test_run_name | 6901ecd14b9cde9e0d8b7b62d11cf3c04b3b4a2e | 15,214 |
from shapely.geometry import Point, LineString
def cut_in_two(line):
"""
Cuts input line into two lines of equal length
Parameters
----------
line : shapely.LineString
input line
Returns
----------
list (LineString, LineString, Point)
two lines and the middle point cutting input line
"""
# Get final distance value
distance = line.length / 2
# Cuts a line in two at a distance from its starting point
if distance <= 0.0 or distance >= line.length:
return [LineString(line)]
coords = list(line.coords)
for i, p in enumerate(coords):
pd = line.project(Point(p))
if pd == distance:
return [LineString(coords[: i + 1]), LineString(coords[i:]), pd]
if pd > distance:
cp = line.interpolate(distance)
return [
LineString(coords[:i] + [(cp.x, cp.y)]),
LineString([(cp.x, cp.y)] + coords[i:]),
cp,
] | 95df9b6b3995a930b6772a5137db3a14f10b4b26 | 15,215 |
def get_processor(aid):
"""
Return the processor module for a given achievement.
Args:
aid: the achievement id
Returns:
The processor module
"""
try:
path = get_achievement(aid)["processor"]
base_path = api.config.get_settings()["achievements"]["processor_base_path"]
return SourceFileLoader(path[:-3], join(base_path, path)).load_module()
except FileNotFoundError:
raise PicoException("Achievement processor is offline.") | 941e998e0e3ee81a6e22903976959e7696dd11ef | 15,216 |
import locale
import re
def parse_price(price):
"""
Convert string price to numbers
"""
if not price:
return 0
price = price.replace(',', '')
return locale.atoi(re.sub('[^0-9,]', "", price)) | bb90aa90b38e66adc73220665bb5e6458bfe5374 | 15,217 |
def render_content(template, context={}, request=None):
"""Renderiza el contenido para un email a partir de la plantilla y el contexto.
Deben existir las versiones ".html" y ".txt" de la plantilla.
Adicionalmente, si se recibe el request, se utilizará para el renderizado.
"""
if request:
context_class = RequestContext(request, context)
else:
context_class = Context(context)
template = Template(template)
return {
"text_content": template.render(context_class),
"html_content": template.render(context_class)
} | 0ef06bb3d42f737e9ae112a852460595b8bb1824 | 15,218 |
def calculate_psi(expected, actual, buckettype="bins", breakpoints=None, buckets=10, axis=0):
"""Calculate the PSI (population stability index) across all variables
Args:
expected: numpy matrix of original values
actual: numpy matrix of new values
buckettype: type of strategy for creating buckets, bins splits into even splits,
quantiles splits into quantile buckets, customize split into customized buckets
breakpoints: if buckettype is customizer, pass a numpy array as breakpoints
buckets: number of quantiles to use in bucketing variables
axis: axis by which variables are defined, 0 for vertical, 1 for horizontal
Returns:
psi_values: ndarray of psi values for each variable
"""
def psi(expected_array, actual_array, buckets, breaks=None):
"""Calculate the PSI for a single variable
Args:
expected_array: numpy array of original values
actual_array: numpy array of new values
buckets: number of percentile ranges to bucket the values into
breaks: default None, customize breakpoints
Returns:
psi_value: calculated PSI value
"""
breakpoints = np.arange(0, buckets + 1) / (buckets) * 100
if buckettype == 'bins':
breakpoints = scale_range(breakpoints, np.min(expected_array), np.max(expected_array))
elif buckettype == 'quantiles':
breakpoints = np.stack([np.percentile(expected_array, b) for b in breakpoints])
elif buckettype == 'customize':
assert breaks is not None, "buckettype is customize, breakpoints should not be None"
breakpoints = breaks
expected_percents = np.histogram(expected_array, breakpoints)[0] / len(expected_array)
actual_percents = np.histogram(actual_array, breakpoints)[0] / len(actual_array)
psi_value = sum(sub_psi(expected_percents[i], actual_percents[i]) for i in range(0, len(expected_percents)))
return psi_value
if len(expected.shape) == 1:
psi_values = np.empty(len(expected.shape))
else:
psi_values = np.empty(expected.shape[axis])
for i in range(0, len(psi_values)):
if len(psi_values) == 1:
psi_values = psi(expected, actual, buckets, breakpoints)
elif axis == 0:
psi_values[i] = psi(expected[:,i], actual[:,i], buckets, breakpoints)
elif axis == 1:
psi_values[i] = psi(expected[i,:], actual[i,:], buckets, breakpoints)
return psi_values | d5250a93e784ce13cc24a1d16a88929d33426c1c | 15,219 |
import subprocess
def accsum(reports):
"""
Runs accsum, returning a ClassReport (the final section in the report).
"""
report_bytes = subprocess.check_output(
[ACCSUM_BIN] + reports,
stderr=subprocess.STDOUT
)
contents = report_bytes.decode('UTF-8')
return ClassReport.from_accuracy_report(contents) | 7a1fe84a3f5699b75d62ab1e3a3b93cd1baef372 | 15,220 |
import requests
def get_sid(token):
"""
Obtain the sid from a given token, returns None if failed connection or other error preventing success
Do not use manually
"""
r = requests.get(url=str(URL + "app"), headers={'Accept': 'text/plain',
'authorization': token,
'Content-Type': 'application/json;charset=utf-8'})
cookie = r.headers.get('set-cookie')
# If successful, then the cookie was set
if cookie:
return cookie.split("connect.sid=", 1)[1].strip("; Path=/; HttpOnly")
return None | eaa26681f988b8c27fecf489bbf1bb1d5c460810 | 15,221 |
def generer_lien(mots, commande="http://www.lextutor.ca/cgi-bin/conc/wwwassocwords.pl?lingo=French&KeyWordFormat=&Maximum=10003&LineWidth=100&Gaps=no_gaps&store_dic=&is_refire=true&Fam_or_Word=&Source=http%3A%2F%2Fwww.lextutor.ca%2Fconc%2Ffr%2F&unframed=true&SearchType=equals&SearchStr={0}&Corpus=Fr_le_monde.txt&ColloSize=&SortType=right&AssocWord=&Associate={1}",contextes=["right","left"]):
"""
retourne une liste de liens.
{'ce':'liendroit'},'ce':'liengauche'}
"""
liens = {}
for mot in mots:
for contexte in contextes:
command = commande.format(quote_plus(mot,encoding="ISO 8859-1"),contexte)
liens.update({contexte:{mot:command}})
return liens | 0a646603fb538468a4ae29b102cb8250479fedce | 15,222 |
import numpy
def ring_forming_scission_grid(zrxn, zma, npoints=(7,)):
""" Build forward WD grid for a ring forming scission reaction
# the following allows for a 2-d grid search in the initial ts_search
# for now try 1-d grid and see if it is effective
"""
# Obtain the scan coordinate
scan_name = ring_forming_scission_scan_coordinate(zrxn, zma)
# Build the grid
npoints1 = npoints[0]
brk_bnd_len = _ts_bnd_len(zma, scan_name)
if brk_bnd_len is not None:
r1min = brk_bnd_len + 0.1 * phycon.ANG2BOHR
r1max = brk_bnd_len + 0.7 * phycon.ANG2BOHR
else:
r1min = (1.54 + 0.1) * phycon.ANG2BOHR
r1max = (1.54 + 0.7) * phycon.ANG2BOHR
grid1 = numpy.linspace(r1min, r1max, npoints1)
grid = tuple(val.item() for val in grid1)
return grid | de6e521ae28603b5afea5148f98a65f578e7b349 | 15,223 |
import re
def parse_proj(lines):
""" parse a project file, looking for section definitions """
section_regex_start = re.compile(
'\s*([0-9A-F]+) /\* ([^*]+) \*/ = {$', re.I)
section_regex_end = re.compile('\s*};$')
children_regex = re.compile('\s*([0-9A-F]+) /\* ([^*]+) \*/,', re.I)
children_regex_start = re.compile('\s*children = \(')
children_regex_end = re.compile('\s*\);')
group_regex = re.compile('\s*sourceTree = ([^;]+);')
file_reference_regex = re.compile(
'\s*([0-9A-F]+) /\* ([^*]+) \*/ = .* ' +
'path = "?([^;"]+)"?; sourceTree = ([^;]+);',
re.I)
entries = {}
current_section = None
got_children = False
for line in lines:
if current_section:
end = section_regex_end.match(line)
if end:
current_section = None
continue
# look for the children marker, or append to children
if got_children:
if children_regex_end.match(line):
got_children = False
else:
child_match = children_regex.match(line)
if child_match:
id = child_match.groups()[0]
name = child_match.groups()[1]
current_section.add_link(Link(id, name))
elif children_regex_start.match(line):
got_children = True
else:
# no children, try to match a sourceTree = ...; line
group = group_regex.match(line)
if group:
current_section.location = group.groups()[0]
else:
# try for a new section
new_section_matches = section_regex_start.match(line)
if new_section_matches:
id = new_section_matches.groups()[0]
name = new_section_matches.groups()[1]
current_section = Section(id, name)
entries[id] = current_section
else:
# no new section, check for a plain FileReference
file_ref_captures = file_reference_regex.match(line)
if file_ref_captures:
id = file_ref_captures.groups()[0]
name = file_ref_captures.groups()[1]
path = file_ref_captures.groups()[2]
location = file_ref_captures.groups()[3]
entries[id] = FileReference(id, name, path, location)
return entries | 28e979a6a3c82f5669704375e8a6104f406af33f | 15,224 |
import torch
def mot_decode(heat,
wh,
reg=None,
cat_spec_wh=False,
K=100):
"""
多目标检测结果解析
"""
batch, cat, height, width = heat.size() # N×C×H×W
# heat = torch.sigmoid(heat)
# perform nms on heatmaps
heat = _nms(heat) # 默认应用3×3max pooling操作, 检测目标数变为feature map的1/9
scores, inds, clses, ys, xs = _topk(scores=heat, K=K)
if reg is not None:
reg = _tranpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = _tranpose_and_gather_feat(wh, inds)
if cat_spec_wh:
wh = wh.view(batch, K, cat, 2)
clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long()
wh = wh.gather(2, clses_ind).view(batch, K, 2)
else:
wh = wh.view(batch, K, 2)
clses = clses.view(batch, K, 1).float() # 目标类别
scores = scores.view(batch, K, 1)
bboxes = torch.cat([xs - wh[..., 0:1] / 2, # left
ys - wh[..., 1:2] / 2, # top
xs + wh[..., 0:1] / 2, # right
ys + wh[..., 1:2] / 2], # down
dim=2)
detections = torch.cat([bboxes, scores, clses], dim=2)
return detections, inds | 22d5c8f85bd90936c46faf73ecb6c520466fb6da | 15,225 |
def get_descriptors(smiles):
""" Use RDkit to get molecular descriptors for the given smiles string """
mol = Chem.MolFromSmiles(smiles)
return pd.Series({name: func(mol) for name, func in descList.items()}) | 2107b4e1d13c2a7a02e15392fe38e1448d1772c2 | 15,226 |
def mro(*bases):
"""Calculate the Method Resolution Order of bases using the C3 algorithm.
Suppose you intended creating a class K with the given base classes. This
function returns the MRO which K would have, *excluding* K itself (since
it doesn't yet exist), as if you had actually created the class.
Another way of looking at this, if you pass a single class K, this will
return the linearization of K (the MRO of K, *including* itself).
"""
seqs = [list(C.__mro__) for C in bases] + [list(bases)]
res = []
while True:
non_empty = list(filter(None, seqs))
if not non_empty:
# Nothing left to process, we're done.
return tuple(res)
for seq in non_empty: # Find merge candidates among seq heads.
candidate = seq[0]
not_head = [s for s in non_empty if candidate in s[1:]]
if not_head:
# Reject the candidate.
candidate = None
else:
break
if not candidate:
raise TypeError("inconsistent hierarchy, no C3 MRO is possible")
res.append(candidate)
for seq in non_empty:
# Remove candidate.
if seq[0] == candidate:
del seq[0] | 87d259d00b073c8728833d8608fed5e4f484a987 | 15,227 |
def ends_with(s, suffix, ignore_case=False):
"""
suffix: str, list, or tuple
"""
if is_str(suffix):
suffix = [suffix]
suffix = list(suffix)
if ignore_case:
for idx, suf in enumerate(suffix):
suffix[idx] = to_lowercase(suf)
s = to_lowercase(s)
suffix = tuple(suffix)
return s.endswith(suffix) | 4b92596f95bb482a196bf2b8a07a6a954f526045 | 15,228 |
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
# 学学
version1 = [int(val) for val in version1.split(".")]
version2 = [int(val) for val in version2.split(".")]
if len(version1) > len(version2):
min_version = version2
max_version = version1
else:
min_version = version1
max_version = version2
# Compare up to min character
for i in range(len(min_version)):
if version1[i] > version2[i]:
return 1
elif version1[i] < version2[i]:
return -1
if len(version1) == len(version2):
return 0
for j in range(i + 1, len(max_version)):
if max_version[j] > 0:
return 1 if max_version == version1 else - 1
return 0 | 70ff77595f61620e1dac32d29be510e0906b505b | 15,229 |
import glob
import re
def create_capital():
""" Use fy and p-t-d capital sets and ref sets to make capital datasets """
adopted = glob.glob(conf['temp_data_dir'] \
+ "/FY*_ADOPT_CIP_BUDGET.xlsx")
proposed = glob.glob(conf['temp_data_dir'] \
+ "/FY*_PROP_CIP_BUDGET.xlsx")
todate = glob.glob(conf['temp_data_dir'] \
+ "/FY*_2DATE_CIP_BUDGET.xlsx")
budgets = adopted + proposed + todate
fund_ref = pd.read_csv(prod_path \
+ "/budget_reference_funds_datasd_v1.csv",dtype={'fund_number':str})
proj_ref = pd.read_csv(prod_path \
+ "/budget_reference_projects_datasd_v1.csv",dtype={'project_number':str})
accounts_ref = pd.read_csv(prod_path \
+ "/budget_reference_accounts_datasd_v1.csv",dtype={'account_number':str})
for count, budget in enumerate(budgets):
fy_pattern = re.compile(r'([0-9][0-9])')
this_fy = fy_pattern.findall(budget)
if "2DATE" in budget:
out_fname = prod_path \
+ "/budget_capital_ptd_FY{}_datasd_v1.csv".format(this_fy[0])
elif "PROP" in budget:
out_fname = prod_path \
+ "/budget_capital_FY{}_prop_datasd_v1.csv".format(this_fy[0])
else:
out_fname = prod_path \
+ "/budget_capital_FY{}_datasd_v1.csv".format(this_fy[0])
df = pd.read_excel(budget)
df = df.iloc[:, [0,1,2,3]]
df.columns = ['amount','code','project_number','object_number']
df['code'] = df['code'].astype(str)
df['project_number'] = df['project_number'].astype(str)
df['object_number'] = df['object_number'].astype(str)
df = pd.merge(df,
fund_ref[['fund_type','fund_number']],
left_on='code',
right_on='fund_number',
how='left')
df = pd.merge(df,
proj_ref[['asset_owning_dept','project_name','project_number']],
left_on='project_number',
right_on='project_number',
how='left')
df = pd.merge(df,
accounts_ref[['account','account_number']],
left_on='object_number',
right_on='account_number',
how='left')
df = df[['amount',
'fund_type',
'fund_number',
'asset_owning_dept',
'project_name',
'project_number',
'account',
'account_number']]
general.pos_write_csv(df,out_fname)
return "Successfully created capital budgets" | 64abc2c73e1455d42b94039cf857534a03075c41 | 15,230 |
def gen_gt_from_quadrilaterals(gt_quadrilaterals, input_gt_class_ids, image_shape, width_stride, box_min_size=3):
"""
从gt 四边形生成,宽度固定的gt boxes
:param gt_quadrilaterals: GT四边形坐标,[n,(x1,y1,x2,y2,x3,y3,x4,y4)]
:param input_gt_class_ids: GT四边形类别,一般就是1 [n]
:param image_shape:
:param width_stride: 分割的步长,一般16
:param box_min_size: 分割后GT boxes的最小尺寸
:return:
gt_boxes:[m,(y1,x1,y2,x2)]
gt_class_ids: [m]
"""
h, w = list(image_shape)[:2]
x_array = np.arange(0, w + 1, width_stride, np.float32) # 固定宽度间隔的x坐标点
# 每个四边形x 最小值和最大值
x_min_np = np.min(gt_quadrilaterals[:, ::2], axis=1)
x_max_np = np.max(gt_quadrilaterals[:, ::2], axis=1)
gt_boxes = []
gt_class_ids = []
for i in np.arange(len(gt_quadrilaterals)):
xs = get_xs_in_range(x_array, x_min_np[i], x_max_np[i]) # 获取四边形内的x中坐标点
ys_min, ys_max = get_min_max_y(gt_quadrilaterals[i], xs)
# print("xs:{}".format(xs))
# 为每个四边形生成固定宽度的gt
for j in range(len(xs) - 1):
x1, x2 = xs[j], xs[j + 1]
y1, y2 = np.min(ys_min[j:j + 2]), np.max(ys_max[j:j + 2])
gt_boxes.append([y1, x1, y2, x2])
gt_class_ids.append(input_gt_class_ids[i])
gt_boxes = np.reshape(np.array(gt_boxes), (-1, 4))
gt_class_ids = np.reshape(np.array(gt_class_ids), (-1,))
# 过滤高度太小的边框
height = gt_boxes[:, 2] - gt_boxes[:, 0]
width = gt_boxes[:, 3] - gt_boxes[:, 1]
indices = np.where(np.logical_and(height >= 8, width >= 2))
return gt_boxes[indices], gt_class_ids[indices] | 4dfd81bd7a0f20334385bc9e1c9681d371e6f609 | 15,231 |
def monotonic(l: list):
"""Return True is list elements are monotonically increasing or decreasing.
>>> monotonic([1, 2, 4, 20])
True
>>> monotonic([1, 20, 4, 10])
False
>>> monotonic([4, 1, 0, -10])
True
"""
#[SOLUTION]
if l == sorted(l) or l == sorted(l, reverse=True):
return True
return False | 1f8a34943e288ea9695f040be91f18cfe82a6e48 | 15,232 |
import time
from re import DEBUG
def get_region_dimm_list(region):
"""
returns list of pmem dimms assocaited with pmem region
"""
name = 'get_region_dimm_list()'
tic = time.perf_counter()
global ndctl
dimm_list = []
# if DEBUG: print("DEBUG: Function:", __name__, "Region:", region )
# if VERBOSE: print(' getting:', __name__, end="...")
for r in range(len(ndctl['regions'])):
# if this region matches arg, get DIMM mappings
if ndctl['regions'][r]['dev'] == region:
for d in range(len(ndctl['regions'][r]['mappings'])):
if DEBUG: print(' ndctl[regions][r]mappings', ndctl['regions'][r]['mappings'][d]['dimm'])
dimm_list.append(ndctl['regions'][r]['mappings'][d]['dimm'])
continue
# if VERBOSE: print('Done')
# if DEBUG: print("Debug:", __name__, region, "DIMMS", dimm_list)
toc = time.perf_counter()
delta_t = toc - tic
td = {'name': name, "elapsed": delta_t, 'tic': tic, 'toc': toc}
timers.append(td)
return dimm_list | b9ecce7d4ce7cc34fcb3e8acd84252e30cadc141 | 15,233 |
from sys import path
def readme():
"""Get the long description from the README file."""
with open(path.join(project_path, 'README.rst'), encoding='utf-8') as f:
return f.read() | 043bbcfd187340ec9101410f5c22f831de10fe6d | 15,234 |
import os
def RFR_dict(input_date: str = None, cache: dict = {}) -> dict:
"""
Returns a dict with url and filenames from the EIOPA website based on the
input_date
>>> RFR_dict(datetime(2018,1,1))
{'input_date': datetime.datetime(2018, 1, 1, 0, 0),
'reference_date': '20171231',
'url': 'https://eiopa.europa.eu/Publications/Standards/',
'path_zipfile': '',
'name_zipfile': 'EIOPA_RFR_20171231.zip',
'path_excelfile': '',
'name_excelfile': 'EIOPA_RFR_20171231_Term_Structures.xlsx'}
Args:
input_date: required date
cache: the cache with the data
Returns
The updated cache with the data
"""
cache = RFR_reference_date(input_date, cache)
reference_date = cache["reference_date"]
full_url = eiopa_link(cache["input_date"], data_type="rfr")
cache["url"] = os.path.dirname(full_url)
cache["name_zipfile"] = os.path.basename(full_url)
cache["name_excelfile"] = (
"EIOPA_RFR_" + reference_date + "_Term_Structures" + ".xlsx"
)
cache["name_excelfile_spreads"] = (
"EIOPA_RFR_" + reference_date + "_PD_Cod" + ".xlsx"
)
return cache | 5069be9ffd95ffd0e3a0451846635db3789694e7 | 15,235 |
import math
def get_weight(stats):
"""
Return a data point weight for the result.
"""
if stats is None or 'ci_99_a' not in stats or 'ci_99_b' not in stats:
return None
try:
a = stats['ci_99_a']
b = stats['ci_99_b']
if math.isinf(a) or math.isinf(b):
# Infinite interval is due to too few samples --- consider
# weight as missing
return None
return 2 / abs(b - a)
except ZeroDivisionError:
return None | 7e44032bc9e51e5fe7522c3f51ead5e733d4107a | 15,236 |
import torch
def get_true_posterior(X: Tensor, y: Tensor) -> (Tensor, Tensor, float, float, float):
"""
Get the parameters of the true posterior of a linear regression model fit to the given data.
Args:
X: The features, of shape (n_samples, n_features).
y: The targets, of shape (n_samples,).
Returns:
mean: The posterior mean, of shape (n_features,).
covar: The posterior covariance, of shape (n_features, n_features).
bias: The posterior bias.
alpha: The precision of the Gaussian prior.
beta: The precision of Gaussian target noise.
"""
br = BayesianRidge()
br.fit(X.numpy(), y.numpy())
mean = torch.from_numpy(br.coef_).float()
covar = torch.from_numpy(br.sigma_).float()
bias = br.intercept_
alpha = br.lambda_
beta = br.alpha_
return mean, covar, bias, alpha, beta | 3431513d52905ec51bbe7b694af02a8274cbf48e | 15,237 |
def findmax(engine,user,measure,depth):
"""Returns a list of top (user,measure) pairs, sorted by measure, up to a given :depth"""
neighbors = engine.neighbors(user)
d = {v:measure(user,v) for v in neighbors}
ranked = sorted(neighbors,key=lambda v:d[v],reverse=True)
return list((v,d[v]) for v in ranked[:depth]) | ecf6d72f8c689f1b7af78a714e55d8fbfe57f2ad | 15,238 |
from typing import OrderedDict
def cart_update(request, pk):
"""
Add/Remove single product (possible multiple qty of product) to cart
:param request: Django's HTTP Request object,
pk: Primary key of
products to be added to cart
:return: Success message
"""
if request.method == 'GET':
sess = request.session
qty = request.GET.get('qty', False)
if qty:
# Initialize a cart and its qty in session if they don't exist
sess['cart_qty'] = sess.get('cart_qty', 0) + int(qty)
sess['cart'] = sess.get('cart', OrderedDict())
# In case the it is add to cart and product not already in cart
new_cart_item = {'qty': 0, 'pk': str(pk)}
# Update cart item quantity of new/existing item
sess['cart'][str(pk)] = sess['cart'].get(str(pk), new_cart_item)
new_qty = sess['cart'][str(pk)]['qty'] + int(qty)
new_qty_above_max = Product.objects.get(pk=pk).quantity < new_qty
# import pdb; pdb.set_trace()
if not new_qty_above_max:
# Sets new quantity to 0 in case quantity has gone negative
sess['cart'][str(pk)]['qty'] = int((abs(new_qty) + new_qty) / 2)
return JsonResponse({'success': True})
return JsonResponse({
'success': False,
'msg': 'Max quantity of this product has already been added.'
}) | 1673b299a41bdccaf6d0a27b15fbf85a0bb7028f | 15,239 |
from typing import Union
from typing import List
def hyperopt_cli(
config: Union[str, dict],
dataset: str = None,
training_set: str = None,
validation_set: str = None,
test_set: str = None,
training_set_metadata: str = None,
data_format: str = None,
experiment_name: str = "experiment",
model_name: str = "run",
# model_load_path=None,
# model_resume_path=None,
skip_save_training_description: bool = False,
skip_save_training_statistics: bool = False,
skip_save_model: bool = False,
skip_save_progress: bool = False,
skip_save_log: bool = False,
skip_save_processed_input: bool = False,
skip_save_unprocessed_output: bool = False,
skip_save_predictions: bool = False,
skip_save_eval_stats: bool = False,
skip_save_hyperopt_statistics: bool = False,
output_directory: str = "results",
gpus: Union[str, int, List[int]] = None,
gpu_memory_limit: int = None,
allow_parallel_threads: bool = True,
callbacks: List[Callback] = None,
backend: Union[Backend, str] = None,
random_seed: int = default_random_seed,
debug: bool = False,
**kwargs,
):
"""Searches for optimal hyperparameters.
# Inputs
:param config: (Union[str, dict]) in-memory representation of
config or string path to a YAML config file.
:param dataset: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing the entire dataset to be used for training.
If it has a split column, it will be used for splitting (0 for train,
1 for validation, 2 for test), otherwise the dataset will be
randomly split.
:param training_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing training data.
:param validation_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing validation data.
:param test_set: (Union[str, dict, pandas.DataFrame], default: `None`)
source containing test data.
:param training_set_metadata: (Union[str, dict], default: `None`)
metadata JSON file or loaded metadata. Intermediate preprocessed
structure containing the mappings of the input
dataset created the first time an input file is used in the same
directory with the same name and a '.meta.json' extension.
:param data_format: (str, default: `None`) format to interpret data
sources. Will be inferred automatically if not specified. Valid
formats are `'auto'`, `'csv'`, `'excel'`, `'feather'`,
`'fwf'`, `'hdf5'` (cache file produced during previous training),
`'html'` (file containing a single HTML `<table>`), `'json'`, `'jsonl'`,
`'parquet'`, `'pickle'` (pickled Pandas DataFrame), `'sas'`, `'spss'`,
`'stata'`, `'tsv'`.
:param experiment_name: (str, default: `'experiment'`) name for
the experiment.
:param model_name: (str, default: `'run'`) name of the model that is
being used.
:param skip_save_training_description: (bool, default: `False`) disables
saving the description JSON file.
:param skip_save_training_statistics: (bool, default: `False`) disables
saving training statistics JSON file.
:param skip_save_model: (bool, default: `False`) disables
saving model weights and hyperparameters each time the model
improves. By default Ludwig saves model weights after each epoch
the validation metric improves, but if the model is really big
that can be time consuming. If you do not want to keep
the weights and just find out what performance a model can get
with a set of hyperparameters, use this parameter to skip it,
but the model will not be loadable later on and the returned model
will have the weights obtained at the end of training, instead of
the weights of the epoch with the best validation performance.
:param skip_save_progress: (bool, default: `False`) disables saving
progress each epoch. By default Ludwig saves weights and stats
after each epoch for enabling resuming of training, but if
the model is really big that can be time consuming and will uses
twice as much space, use this parameter to skip it, but training
cannot be resumed later on.
:param skip_save_log: (bool, default: `False`) disables saving
TensorBoard logs. By default Ludwig saves logs for the TensorBoard,
but if it is not needed turning it off can slightly increase the
overall speed.
:param skip_save_processed_input: (bool, default: `False`) if input
dataset is provided it is preprocessed and cached by saving an HDF5
and JSON files to avoid running the preprocessing again. If this
parameter is `False`, the HDF5 and JSON file are not saved.
:param skip_save_unprocessed_output: (bool, default: `False`) by default
predictions and their probabilities are saved in both raw
unprocessed numpy files containing tensors and as postprocessed
CSV files (one for each output feature). If this parameter is True,
only the CSV ones are saved and the numpy ones are skipped.
:param skip_save_predictions: (bool, default: `False`) skips saving test
predictions CSV files
:param skip_save_eval_stats: (bool, default: `False`) skips saving test
statistics JSON file
:param skip_save_hyperopt_statistics: (bool, default: `False`) skips saving
hyperopt stats file.
:param output_directory: (str, default: `'results'`) the directory that
will contain the training statistics, TensorBoard logs, the saved
model and the training progress files.
:param gpus: (list, default: `None`) list of GPUs that are available
for training.
:param gpu_memory_limit: (int, default: `None`) maximum memory in MB to
allocate per GPU device.
:param allow_parallel_threads: (bool, default: `True`) allow TensorFlow
to use multithreading parallelism to improve performance at
the cost of determinism.
:param callbacks: (list, default: `None`) a list of
`ludwig.callbacks.Callback` objects that provide hooks into the
Ludwig pipeline.
:param backend: (Union[Backend, str]) `Backend` or string name
of backend to use to execute preprocessing / training steps.
:param random_seed: (int: default: 42) random seed used for weights
initialization, splits and any other random function.
:param debug: (bool, default: `False) if `True` turns on `tfdbg` with
`inf_or_nan` checks.
**kwargs:
# Return
:return" (`None`)
"""
return hyperopt(
config=config,
dataset=dataset,
training_set=training_set,
validation_set=validation_set,
test_set=test_set,
training_set_metadata=training_set_metadata,
data_format=data_format,
experiment_name=experiment_name,
model_name=model_name,
# model_load_path=model_load_path,
# model_resume_path=model_resume_path,
skip_save_training_description=skip_save_training_description,
skip_save_training_statistics=skip_save_training_statistics,
skip_save_model=skip_save_model,
skip_save_progress=skip_save_progress,
skip_save_log=skip_save_log,
skip_save_processed_input=skip_save_processed_input,
skip_save_unprocessed_output=skip_save_unprocessed_output,
skip_save_predictions=skip_save_predictions,
skip_save_eval_stats=skip_save_eval_stats,
skip_save_hyperopt_statistics=skip_save_hyperopt_statistics,
output_directory=output_directory,
gpus=gpus,
gpu_memory_limit=gpu_memory_limit,
allow_parallel_threads=allow_parallel_threads,
callbacks=callbacks,
backend=backend,
random_seed=random_seed,
debug=debug,
**kwargs,
) | 8abca7e92e64216f50cab5c1d3757e01111e9512 | 15,240 |
def mlp_gradient(x, y, ws, bs, phis, alpha):
"""
Return a list containing the gradient of the cost with respect to z^(k)for each layer.
:param x: a list of lists representing the x matrix.
:param y: a list of lists of output values.
:param ws: a list of weight matrices (one for each layer)
:param bs: a list of biases (one for each layer)
:param phis: a list of activation functions:param hs
: a list of outputs for each layer include h^(0) = x
:return: A list of gradients of J with respect to z^(k) for k=1..l
"""
hs = mlp_feed_forward(x, ws, bs, phis)
D = mlp_propagate_error(x, y, ws, bs, phis, hs)
result_w = []
result_b = []
w_1 = np.dot(np.transpose(x), D[0])
step = np.multiply(alpha, ws[0])
w_1 = np.add(w_1, step)
w_1 = np.ndarray.tolist(w_1)
result_w.append(w_1)
for layers in range(1, len(ws)):
w_2 = np.dot(np.transpose(hs[layers]), D[layers])
w_2 = np.add(w_2, np.multiply(alpha, ws[layers]))
result_w.append(w_2)
for layers in range(len(ws)):
ones = np.ones((len(x), 1))
b_1 = np.dot(np.transpose(ones), D[layers])
result_b.append(b_1)
result_w = np.reshape(result_w, (1, -1))
return result_w, result_b | 0e148d5b3b343a982d9332637c4f51be8b3afa3b | 15,241 |
import torch
def src_one(y: torch.Tensor, D: torch.Tensor, *,
k=None, device=None) -> torch.Tensor:
"""
y = Dx
:param y: image (h*w)
:param D: dict (class_sz, train_im_sz, h*w)
:param k:
:param device: pytorch device
:return: predict tensor(int)
"""
assert y.dim() == 1
assert D.dim() == 3
assert y.size(dim=0) == D.size(dim=2)
class_sz, train_im_sz, n_features = D.shape # n_features=h*w
D_x = D.view(class_sz * train_im_sz, n_features)
D_x = D_x.permute([1, 0]) # D_x(n_features, class_sz*train_im_sz)
# y(n_features)
a = omp(D_x, y, k=k, device=device) # a(class_sz*train_im_sz)
X_i = D.permute([0, 2, 1]) # X_i(class_sz, h*w, train_im_sz)
a_i = a.view(class_sz, train_im_sz, 1) # a(class_sz, train_im_sz, 1)
y_p = torch.matmul(X_i, a_i).view(class_sz, n_features)
e_y = torch.mean((y - y_p) ** 2, dim=1)
return torch.argmin(e_y) | b779e3313fb707bb6659fe48f59b030b9c9ae7d3 | 15,242 |
from typing import Union
from typing import Sequence
def average_false_positive_score(
y_true: Union[Sequence[int], np.ndarray, pd.Series],
y_pred: Union[Sequence[int], np.ndarray, pd.Series],
) -> float:
"""Calculates the average false positive score. Used for when we have more than 2 classes and want our models'
average performance for each class
Parameters
----------
y_true: list or array like
The true, or the expected, values of our problem
y_pred: list or array like
The predicted values of our problem
Returns
-------
The average of each false positive score for each group/class
"""
if len(np.unique(y_true)) < 3:
return false_positive_score(y_true, y_pred)
else:
overall_score = 0
unique_classes = np.unique(y_true)
for pos_class in unique_classes:
overall_score += false_positive_score(
y_true, y_pred, problem="multiclass", positive_class=pos_class
)
return overall_score / len(unique_classes) | 4b789381e25efffc0aa811287bab8299edf6b962 | 15,243 |
import html
def display_text_paragraph(text: str):
"""Displays paragraph of text (e.g. explanation, plot interpretation)
Args:
text (str): Informational text
Returns:
html.Small: Wrapper for text paragraph
"""
return html.P(children=[text],
style={'font-size': '14px',
'white-space': 'pre-wrap'
}) | 8c4ae8f7b606b81726149891fb5db624647ba484 | 15,244 |
def is_numeric(_type) -> bool:
"""
Check if sqlalchemy _type is derived from Numeric
"""
return issubclass(_type.__class__, Numeric) | 1d604873e4043206b50ddc09c691331c4c50c49c | 15,245 |
def make_generic_time_plotter(
retrieve_data,
label,
dt,
time_unit=None,
title=None,
unit=None,
):
"""Factory function for creating plotters that can plot data over time.
The function returns a function which can be called whenever the plot should be drawn.
This function takes no arguments and will create a new figure and plot the given data when called.
This function doesn't call plt.show() so this must be done by the calling code.
:param retrive_data: function that returns data to plot over time when called with no arguments.
:param str label: Label representing the data.
:param number dt: delta time between time steps in data.
:param str time_unit: unit of time, e.g. 'fs'.
:param str title: title of plot.
:param str unit: unit of data, e.g. 'K'.
"""
def plotter():
data = retrieve_data()
t = np.arange(0, len(data)*dt, dt)
fig = plt.figure()
ax = plt.axes()
plt.title(title if title else label)
plt.xlabel(f"Time [{time_unit}]" if time_unit else f"Time")
plt.ylabel(f"{label} [{unit}]" if unit else f"{label}")
ax.plot(t, data, marker = 'o')
return plotter | 3fa391a94973e5b98394e684d8e4018fa16811df | 15,246 |
def registration(request):
"""Registration product page
"""
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user object but avoid saving it yet
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(user_form.cleaned_data['password'])
# Save the User object
new_user.save()
return render(request, 'registration//register_done.html', {'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request, 'registration/registr.html', {'user_form': user_form}) | d176a5027058124dfd30a247f924776a87f7aba3 | 15,247 |
def error_measure(predictions, labels):
""" calculate sum squared error of predictions """
return np.sum(np.power(predictions - labels, 2)) / (predictions.shape[0]) | 135b3b90047895ecff90aed6f4a37d73ef0ddd17 | 15,248 |
def add3(self, x, y):
"""Celery task: add numbers."""
return x + y | 0d1017953dcdd1a0791afe291ce005247547f198 | 15,249 |
def zscore(dat, mean, sigma):
"""Calculates zscore of a data point in (or outside of) a dataset
zscore: how many sigmas away is a value from the mean of a dataset?
Parameters
----------
dat: float
Data point
mean: float
Mean of dataset
sigma: flaot
Sigma of dataset
"""
zsc = (dat-mean)/sigma
return zsc | b11216e50632e2024af0a389184d5e1dba7ed4fd | 15,250 |
from typing import OrderedDict
from typing import Tuple
from re import S
def _create_ast_bilinear_form(terminal_expr, atomic_expr_field,
tests, d_tests,
trials, d_trials,
fields, d_fields, constants,
nderiv, dim, mapping, d_mapping, is_rational_mapping, spaces, mapping_space, mask, tag, is_parallel,
**kwargs):
"""
This function creates the assembly function of a bilinearform
Parameters
----------
terminal_expr : <Matrix>
atomic representation of the bilinear form
atomic_expr_field: <dict>
dict of atomic expressions of fields
tests : <list>
list of tests functions
d_tests : <dict>
dictionary that contains the symbolic spans and basis values of each test function
trials : <list>
list of trial functions
d_trials: <list>
dictionary that contains the symbolic spans and basis values of each trial function
fields : <list>
list of fields
constants : <list>
list of constants
nderiv : int
the order of the bilinear form
dim : int
number of dimension
mapping : <Mapping>
Sympde Mapping object
d_mapping : <dict>
dictionary that contains the symbolic spans and basis values of the mapping
is_rational_mapping : <bool>
takes the value of True if the mapping is rational
spaces : <list>
list of sympde symbolic test and trial spaces
mask : <int|None>
the masked direction in case of boundary domain
tag : <str>
tag to be added to variable names
is_parallel : <bool>
True if the domain is distributed
Returns
-------
node : DefNode
represents the a function definition node that computes the assembly
"""
pads = variables(('pad1, pad2, pad3'), dtype='int')[:dim]
b0s = variables(('b01, b02, b03'), dtype='int')[:dim]
e0s = variables(('e01, e02, e03'), dtype='int')[:dim]
g_quad = GlobalTensorQuadrature(False)
l_quad = LocalTensorQuadrature(False)
quad_order = kwargs.pop('quad_order', None)
# ...........................................................................................
g_span = OrderedDict((u,d_tests[u]['span']) for u in tests)
f_span = OrderedDict((f,d_fields[f]['span']) for f in fields)
if mapping_space:
m_span = OrderedDict((f,d_mapping[f]['span']) for f in d_mapping)
else:
m_span = {}
m_trials = OrderedDict((u,d_trials[u]['multiplicity']) for u in trials)
m_tests = OrderedDict((v,d_tests[v]['multiplicity']) for v in tests)
lengths_trials = OrderedDict((u,LengthDofTrial(u)) for u in trials)
lengths_tests = OrderedDict((v,LengthDofTest(v)) for v in tests)
lengths_outer_tests = OrderedDict((v,LengthOuterDofTest(v)) for v in tests)
lengths_inner_tests = OrderedDict((v,LengthInnerDofTest(v)) for v in tests)
lengths_fields = OrderedDict((f,LengthDofTest(f)) for f in fields)
# ...........................................................................................
quad_length = LengthQuadrature()
el_length = LengthElement()
lengths = [el_length, quad_length]
# ...........................................................................................
geo = GeometryExpressions(mapping, nderiv)
g_coeffs = {f:[MatrixGlobalBasis(i,i) for i in expand([f])] for f in fields}
l_mats = BlockStencilMatrixLocalBasis(trials, tests, terminal_expr, dim, tag)
g_mats = BlockStencilMatrixGlobalBasis(trials, tests, pads, m_tests, terminal_expr, l_mats.tag)
# ...........................................................................................
if quad_order is not None:
ind_quad = index_quad.set_range(stop=Tuple(*quad_order))
else:
ind_quad = index_quad.set_range(stop=quad_length)
ind_element = index_element.set_range(stop=el_length)
if mapping_space:
ind_dof_test = index_dof_test.set_range(stop=Tuple(*[d+1 for d in list(d_mapping.values())[0]['degrees']]))
# ...........................................................................................
eval_mapping = EvalMapping(ind_quad, ind_dof_test, list(d_mapping.values())[0]['global'],
mapping, geo, mapping_space, nderiv, mask, is_rational_mapping)
eval_fields = []
for f in fields:
f_ex = expand([f])
coeffs = [CoefficientBasis(i) for i in f_ex]
l_coeffs = [MatrixLocalBasis(i) for i in f_ex]
ind_dof_test = index_dof_test.set_range(stop=lengths_fields[f]+1)
eval_field = EvalField(atomic_expr_field[f], ind_quad, ind_dof_test, d_fields[f]['global'],
coeffs, l_coeffs, g_coeffs[f], [f], mapping, nderiv, mask)
eval_fields += [eval_field]
g_stmts = []
if mapping_space:
g_stmts.append(eval_mapping)
g_stmts += [*eval_fields]
g_stmts_texpr = []
# sort tests and trials by their space type
test_groups = regroup(tests)
trial_groups = regroup(trials)
# expand every VectorFunction into IndexedVectorFunctions
ex_tests = expand(tests)
ex_trials = expand(trials)
#=========================================================begin kernel======================================================
for _, sub_tests in test_groups:
for _, sub_trials in trial_groups:
tests_indices = [ex_tests.index(i) for i in expand(sub_tests)]
trials_indices = [ex_trials.index(i) for i in expand(sub_trials)]
sub_terminal_expr = terminal_expr[tests_indices,trials_indices]
if is_zero(sub_terminal_expr):
continue
q_basis_tests = OrderedDict((v,d_tests[v]['global']) for v in sub_tests)
q_basis_trials = OrderedDict((u,d_trials[u]['global']) for u in sub_trials)
m_tests = OrderedDict((v,d_tests[v]['multiplicity']) for v in sub_tests)
m_trials = OrderedDict((u,d_trials[u]['multiplicity']) for u in sub_trials)
tests_degree = OrderedDict((v,d_tests[v]['degrees']) for v in sub_tests)
trials_degrees = OrderedDict((u,d_trials[u]['degrees']) for u in sub_trials)
bs = OrderedDict()
es = OrderedDict()
for v in sub_tests:
v_str = str(SymbolicExpr(v))
bs[v] = variables(('b_{v}_1, b_{v}_2, b_{v}_3'.format(v=v_str)), dtype='int')[:dim] if is_parallel else [S.Zero]*dim
es[v] = variables(('e_{v}_1, e_{v}_2, e_{v}_3'.format(v=v_str)), dtype='int')[:dim] if is_parallel else [S.Zero]*dim
if all(a==1 for a in m_tests[sub_tests[0]]+m_trials[sub_trials[0]]):
stmts = []
for v in sub_tests+sub_trials:
stmts += construct_logical_expressions(v, nderiv)
l_sub_mats = BlockStencilMatrixLocalBasis(sub_trials, sub_tests, sub_terminal_expr, dim, l_mats.tag,
tests_degree=tests_degree, trials_degree=trials_degrees,
tests_multiplicity=m_tests, trials_multiplicity=m_trials)
# Instructions needed to retrieve the precomputed values of the
# fields (and their derivatives) at a single quadrature point
stmts += flatten([eval_field.inits for eval_field in eval_fields])
loop = Loop((l_quad, *q_basis_tests.values(), *q_basis_trials.values(), geo), ind_quad, stmts=stmts, mask=mask)
loop = Reduce('+', ComputeKernelExpr(sub_terminal_expr, weights=False), ElementOf(l_sub_mats), loop)
# ... loop over trials
length = Tuple(*[d+1 for d in trials_degrees[sub_trials[0]]])
ind_dof_trial = index_dof_trial.set_range(stop=length)
loop1 = Loop((), ind_dof_trial, [loop])
# ... loop over tests
length = Tuple(*[d+1 for d in tests_degree[sub_tests[0]]])
ends = Tuple(*[d+1-e for d,e in zip(tests_degree[sub_tests[0]], es[sub_tests[0]])])
starts = Tuple(*bs[sub_tests[0]])
ind_dof_test = index_dof_test.set_range(start=starts, stop=ends, length=length)
loop = Loop((), ind_dof_test, [loop1])
# ...
body = (Reset(l_sub_mats), loop)
stmts = Block(body)
g_stmts += [stmts]
if is_parallel:
ln = Tuple(*[d-1 for d in tests_degree[sub_tests[0]]])
start_expr = TensorMax(TensorMul(TensorAdd(TensorMul(ind_element, Tuple(*[-1]*dim)), ln), Tuple(*b0s)),Tuple(*[S.Zero]*dim))
start_expr = TensorAssignExpr(Tuple(*bs[sub_tests[0]]), start_expr)
end_expr = TensorMax(TensorMul(TensorAdd(TensorMul(Tuple(*[-1]*dim), ind_element.length), TensorAdd(ind_element, Tuple(*tests_degree[sub_tests[0]]))), Tuple(*e0s)), Tuple(*[S.Zero]*dim))
end_expr = TensorAssignExpr(Tuple(*es[sub_tests[0]]), end_expr)
g_stmts_texpr += [start_expr, end_expr]
else:
l_stmts = []
mask_inner = [[False, True] for i in range(dim)]
for mask_inner_i in product(*mask_inner):
mask_inner_i = Tuple(*mask_inner_i)
not_mask_inner_i = Tuple(*[not i for i in mask_inner_i])
stmts = []
for v in sub_tests+sub_trials:
stmts += construct_logical_expressions(v, nderiv)
# Instructions needed to retrieve the precomputed values of the
# fields (and their derivatives) at a single quadrature point
stmts += flatten([eval_field.inits for eval_field in eval_fields])
multiplicity = Tuple(*m_tests[sub_tests[0]])
length = Tuple(*[(d+1)%m if T else (d+1)//m for d,m,T in zip(tests_degree[sub_tests[0]], multiplicity, mask_inner_i)])
ind_outer_dof_test = index_outer_dof_test.set_range(stop=length)
outer = Tuple(*[d//m for d,m in zip(tests_degree[sub_tests[0]], multiplicity)])
outer = TensorAdd(TensorMul(ind_outer_dof_test, not_mask_inner_i),TensorMul(outer, mask_inner_i))
l_sub_mats = BlockStencilMatrixLocalBasis(sub_trials, sub_tests, sub_terminal_expr, dim, l_mats.tag, outer=outer,
tests_degree=tests_degree, trials_degree=trials_degrees,
tests_multiplicity=m_tests, trials_multiplicity=m_trials)
loop = Loop((l_quad, *q_basis_tests.values(), *q_basis_trials.values(), geo), ind_quad, stmts=stmts, mask=mask)
loop = Reduce('+', ComputeKernelExpr(sub_terminal_expr, weights=False), ElementOf(l_sub_mats), loop)
# ... loop over trials
length_t = Tuple(*[d+1 for d in trials_degrees[sub_trials[0]]])
ind_dof_trial = index_dof_trial.set_range(stop=length_t)
loop = Loop((), ind_dof_trial, [loop])
rem_length = Tuple(*[(d+1)-(d+1)%m for d,m in zip(tests_degree[sub_tests[0]], multiplicity)])
ind_inner_dof_test = index_inner_dof_test.set_range(stop=multiplicity)
expr1 = TensorAdd(TensorMul(ind_outer_dof_test, multiplicity),ind_inner_dof_test)
expr2 = TensorAdd(rem_length, ind_outer_dof_test)
expr = TensorAssignExpr(index_dof_test, TensorAdd(TensorMul(expr1,not_mask_inner_i),TensorMul(expr2, mask_inner_i)))
loop = Loop((expr,), ind_inner_dof_test, [loop], mask=mask_inner_i)
loop = Loop((), ind_outer_dof_test, [loop])
l_stmts += [loop]
g_stmts += [Reset(l_sub_mats), *l_stmts]
#=========================================================end kernel=========================================================
# ... loop over global elements
loop = Loop((g_quad, *g_span.values(), *m_span.values(), *f_span.values(), *g_stmts_texpr),
ind_element, stmts=g_stmts, mask=mask)
body = [Reduce('+', l_mats, g_mats, loop)]
# ...
args = OrderedDict()
args['tests_basis'] = tuple(d_tests[v]['global'] for v in tests)
args['trial_basis'] = tuple(d_trials[u]['global'] for u in trials)
args['spans'] = g_span.values()
args['quads'] = g_quad
args['tests_degrees'] = lengths_tests
args['trials_degrees'] = lengths_trials
args['quads_degree'] = lengths
args['global_pads'] = pads
args['local_pads'] = Pads(tests, trials)
args['mats'] = [l_mats, g_mats]
if mapping_space:
args['mapping'] = eval_mapping.coeffs
args['mapping_degrees'] = LengthDofTest(list(d_mapping.keys())[0])
args['mapping_basis'] = list(d_mapping.values())[0]['global']
args['mapping_spans'] = list(d_mapping.values())[0]['span']
if fields:
args['f_span'] = f_span.values()
args['f_coeffs'] = flatten(list(g_coeffs.values()))
args['field_basis'] = tuple(d_fields[f]['global'] for f in fields)
args['fields_degrees'] = lengths_fields.values()
args['f_pads'] = [f.pads for f in eval_fields]
fields = tuple(f.base if isinstance(f, IndexedVectorFunction) else f for f in fields)
args['fields'] = tuple(dict.fromkeys(fields))
if constants:
args['constants'] = constants
args['starts'] = b0s
args['ends'] = e0s
local_vars = []
node = DefNode('assembly', args, local_vars, body, 'bilinearform')
return node | 0929f83f1cfcc6424b00d5b931017ec5af6ffaee | 15,251 |
import six
import sys
import os
def get_package_for_module(module):
"""Get package name for a module.
Helper calculates the package name of a module.
Args:
module: Module to get name for. If module is a string, try to find
module in sys.modules.
Returns:
If module contains 'package' attribute, uses that as package name.
Else, if module is not the '__main__' module, the module __name__.
Else, the base name of the module file name. Else None.
"""
if isinstance(module, six.string_types):
try:
module = sys.modules[module]
except KeyError:
return None
try:
return six.text_type(module.package)
except AttributeError:
if module.__name__ == '__main__':
try:
file_name = module.__file__
except AttributeError:
pass
else:
base_name = os.path.basename(file_name)
split_name = os.path.splitext(base_name)
if len(split_name) == 1:
return six.text_type(base_name)
else:
return u'.'.join(split_name[:-1])
return six.text_type(module.__name__) | 0914a6f2018a046fc13589081976f2e1a67a803f | 15,252 |
import math
def asen(x):
"""
El arcoseno de un número.
El resultado está expresado en radianes.
.. math::
\\arcsin(x)
Args:
x (float): Argumento.
Returns:
El ángulo expresado en radianes.
"""
return math.asin(x) | c52f7fc504c1eb02eb240378b14b19b0752c7299 | 15,253 |
def get_mock_response(status_code: int, reason: str, text: str):
"""
Return mock response.
:param status_code: An int representing status_code.
:param reason: A string to represent reason.
:param text: A string to represent text.
:return: MockResponse object.
"""
MockResponse = namedtuple("MockResponse", ["status_code", "reason", "text"])
mock_response = MockResponse(status_code, reason, text)
return mock_response | e1743755c64796e5644a00e26414fc16c110c1b6 | 15,254 |
import traceback
def get_user_stack_depth(tb: TracebackType, f: StackFilter) -> int:
"""Determines the depth of the stack within user-code.
Takes a 'StackFilter' function that filters frames by whether
they are in user code or not and returns the number of frames
in the traceback that are within user code.
The return value can be negated for use with the limit argument
to functions in the traceback module.
"""
depth = 0
for s, _ in traceback.walk_tb(tb):
if depth or f(s):
depth += 1
return depth | e02f1ca3ee6aeb765a09806ecded5919a28b5df0 | 15,255 |
def unused(attr):
"""
This function check if an attribute is not set (has no value in it).
"""
if attr is None:
return True
else:
return False | febc225f3924fdb9de6cfbf7eba871cce5b6e374 | 15,256 |
def compute_npipelines_xgbrf_5_6():
"""Compute the total number of XGB/RF pipelines evaluated"""
df = _load_pipelines_df()
npipelines_rf = np.sum(df['pipeline'].str.contains('random_forest'))
npipelines_xgb = np.sum(df['pipeline'].str.contains('xgb'))
total = npipelines_rf + npipelines_xgb
result = pd.DataFrame(
[npipelines_rf, npipelines_xgb, total],
index=['RF', 'XGB', 'total'],
columns=['pipelines']
)
fn = OUTPUT_DIR.joinpath('5_6_npipelines_xgbrf.csv')
result.to_csv(fn)
return result | 7e7b9ea536564b4796dcf9eea6866a8c64ce0c4e | 15,257 |
def get_evaluate_SLA(SLA_terms, topology, evaluate_individual):
"""Generate a function to evaluate if the flow reliability and latency requirements are met
Args:
SLA_terms {SLA} -- an SLA object containing latency and bandwidth requirements
topology {Topology} -- the reference topology object for the flow
evaluate_individual {function}: a cost function, which returns the metric for a given individual
individual {DEAP individual (list)} -- the individual
Returns:
evaluate_SLA {Function}: a function returning True if the requirements are met, False otherwise
"""
def evaluate_SLA(individual):
evaluation = evaluate_individual(individual)
if evaluation[3] > SLA_terms.latency or evaluation[1] > 1:
return False
return True
return evaluate_SLA | 81fdaa07e3fc21066ab734bef0cc71457d40fb5b | 15,258 |
def latest_consent(user, research_study_id):
"""Lookup latest valid consent for user
:param user: subject of query
:param research_study_id: limit query to respective value
If latest consent for user is 'suspended' or 'deleted', this function
will return None. See ``consent_withdrawal_dates()`` for that need.
:returns: the most recent consent based on given criteria, or None
if no match is located
"""
# consents are ordered desc(acceptance_date)
for consent in user.valid_consents:
if consent.research_study_id != research_study_id:
continue
if consent.status == 'consented':
return consent
return None | 2295b592a0c1fdaf3b1ed21e065f39e73a4bb622 | 15,259 |
def microarray():
""" Fake microarray dataframe
"""
data = np.arange(9).reshape(3, 3)
cols = pd.Series(range(3), name='sample_id')
ind = pd.Series([1058685, 1058684, 1058683], name='probe_id')
return pd.DataFrame(data, columns=cols, index=ind) | 7bca3cf21f2942819c62c597af8761ec04fa91ba | 15,260 |
from typing import Tuple
def find_next_tag(template: str, pointer: int, left_delimiter: str) -> Tuple[str, int]:
"""Find the next tag, and the literal between current pointer and that tag"""
split_index = template.find(left_delimiter, pointer)
if split_index == -1:
return (template[pointer:], len(template))
return (template[pointer:split_index], split_index) | 82d091ef6738ffbe93e8ea8a0096161fc359e9cb | 15,261 |
def hasNLines(N,filestr):
"""returns true if the filestr has at least N lines and N periods (~sentences)"""
lines = 0
periods = 0
for line in filestr:
lines = lines+1
periods = periods + len(line.split('.'))-1
if lines >= N and periods >= N:
return True;
return False; | d75c4d241d7c4364c410f2dbae06f1c4d439b14e | 15,262 |
def CAMNS_LP(xs, N, lptol=1e-8, exttol=1e-8, verbose=True):
"""
Solve CAMNS problem via reduction to Linear Programming
Arguments:
----------
xs : np.ndarray of shape (M, L)
Observation matrix consisting of M observations
N : int
Number of observations
lptol : float
Tolerance for Linear Programming problem
exttol : float
Tolerance for extreme point check
verbose : bool
Whether to print information about progress
Returns:
--------
np.ndarray of shape (N, L)
Estimated source matrix
"""
M, L = xs.shape # Extract dimensions
xs = xs.T
d = np.mean(xs, axis=1, keepdims=True)
C, _, _ = np.linalg.svd(xs - d, full_matrices=False)
C = C[:, :(N - 1)] # Truncate the redundant one
# Step 1. Preparing variables
B = np.diag(np.ones(L))
l = 0 # Number of extracted sources
S = np.zeros((0, L)) # Source matrix
epoch = 1
while l < N:
if verbose:
print("Epoch {}:".format(epoch))
print("=" * 58)
epoch += 1
# Step 2. Choosing random vector and generating direction r
w = np.random.randn(L)
r = B @ w
# Step 3. Solving linear programming problems using CVXPY
alpha1_star = cp.Variable(C.shape[1])
alpha2_star = cp.Variable(C.shape[1])
problem1 = cp.Problem(cp.Minimize(
r.T @ (C @ alpha1_star)), [C @ alpha1_star + d.flatten() >= 0])
problem2 = cp.Problem(cp.Maximize(
r.T @ (C @ alpha2_star)), [C @ alpha2_star + d.flatten() >= 0])
if verbose:
print("\tLaunching LP solver 1")
p_star = problem1.solve()
if verbose:
print("\tLaunching LP solver 2")
q_star = problem2.solve()
if verbose:
print("\tLP solvers have finished, checking results")
alpha1_star = np.expand_dims(alpha1_star.value, axis=1)
alpha2_star = np.expand_dims(alpha2_star.value, axis=1)
s1 = C @ alpha1_star + d
s2 = C @ alpha2_star + d
# Step 4. Checking results (with augmentations from MATLAB implementation)
if l == 0:
if is_extreme_point(C, alpha1_star, d, exttol):
S = np.append(S, [s1.squeeze()], axis=0)
if is_extreme_point(C, alpha2_star, d, exttol):
S = np.append(S, [s2.squeeze()], axis=0)
else:
if np.abs(p_star) / (np.linalg.norm(r) * np.linalg.norm(s1)) >= lptol:
if is_extreme_point(C, alpha1_star, d, exttol):
S = np.append(S, [s1.squeeze()], axis=0)
if np.abs(q_star) / (np.linalg.norm(r) * np.linalg.norm(s2)) >= lptol:
if is_extreme_point(C, alpha2_star, d, exttol):
S = np.append(S, [s2.squeeze()], axis=0)
# Step 5. Updating l
l = S.shape[0]
if verbose:
print("\tRetrieved {}/{} sources\n".format(l, N))
# Step 6. Updating B
Q1, R1 = np.linalg.qr(S.T)
B = np.diag(np.ones(L)) - Q1 @ Q1.T
# Step 7 is kinda implicit, as it is hidden in the loop condition
# Yay, we're done!
return S | e7f0416e0fa6949e50341b7a0009e574ecf6b0be | 15,263 |
def hamiltonian_c(n_max, in_w, e, d):
"""apply tridiagonal real Hamiltonian matrix to a complex vector
Parameters
----------
n_max : int
maximum n for cutoff
in_w : np.array(complex)
state in
d : np.array(complex)
diagonal elements of Hamiltonian
e : np.array(complex)
off diagonal elements of Hamiltonian
Returns
-------
out_w : np.array(complex)
application of Hamiltonian to vector
"""
n_max = int(n_max)
out_w = in_w[:n_max]*d[:n_max]
out_w[:(n_max-1)] += e[:(n_max-1)]*in_w[1:n_max]
out_w[1:n_max] += e[:n_max-1] * in_w[:n_max-1]
return out_w | 9b78d86592622100322d7a4ec031c1bd531ca51a | 15,264 |
def unique_badge():
""" keep trying until a new random badge number has been found to return """
rando = str(randint(1000000000, 9999999999))
badge = User.query.filter_by(badge=rando).first()
print("rando badge query = {}".format(badge))
if badge:
unique_badge()
return rando | 64a60dd420516bdc08a8ac2102b83e0cf92086ef | 15,265 |
def mid_price(high, low, timeperiod: int = 14):
"""Midpoint Price over period 期间中点价格
:param high:
:param low:
:param timeperiod:
:return:
"""
return MIDPRICE(high, low, timeperiod) | 7092d057da86b12b10da6928367aee705e14569a | 15,266 |
import pickle
def load_pyger_pickle(filename):
""" Load pyger data from pickle file back into object compatible with pyger plotting methods
:param filename: File name of pickled output from calc_constraints()
This is only meant to be used to read in the initial constraints object produced by
calc_constraints(), not the cooldown data produced by calc_constraints2(). The data prduced
by calc_constraints2() should be able to be read in with a simple pickle.load() function.
"""
class saved_pyger_data(object):
def __init__(self, pickled_constraint):
for key in pickled_constraint:
self.__dict__.update({key:pickled_constraint[key]})
rawdata = pickle.load(open(filename,'rb'))
pyger_compatible_data = {}
for name in list(rawdata.keys()):
constraint = saved_pyger_data(rawdata[name])
pyger_compatible_data.update({name:constraint})
return pyger_compatible_data | 23f4d4f2e3cae514ed65d62035277417c9b246a8 | 15,267 |
import sys
import os
def absPath(myPath):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
return os.path.join(base_path, os.path.basename(myPath))
except Exception:
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, myPath) | d1e17deb0c2adcea5b630950b4347dac9daa6f47 | 15,268 |
from typing import OrderedDict
def createitemdict(index, tf2info):
"""Take a TF2 item and return a custom dict with a limited number of
keys that are used for search"""
item = tf2info.items[index]
name = item['item_name']
classes = tf2api.getitemclasses(item)
attributes = tf2api.getitemattributes(item,
tf2info.attributes, tf2info.effects)
storeprice = tf2api.getstoreprice(item, tf2info.storeprices)
backpackprice = tf2api.getmarketprice(item, tf2info.backpackprices)
tags = tf2api.getitemtags(item)
# Sort blueprints by crafting chance
blueprint = sorted(tf2info.blueprints[index],
key=lambda k: k['chance'], reverse=True)
description = ''
if 'bundle' in tags and storeprice:
descriptions = tf2info.bundles[index]['descriptions']
text = []
items = []
for i in range(len(descriptions)):
key = str(i)
value = descriptions[key]['value']
if value in tf2info.itemsbyname:
items.append(value)
else:
text.append(value)
description = '{}---{}'.format('\n'.join(text), '\n'.join(items))
elif 'item_description' in item:
description = item['item_description']
if 'bundle' in tags and name in tf2info.itemsets:
description += '---' + '\n'.join(tf2info.itemsets[name]['items'])
levels = OrderedDict.fromkeys(
str(item[i]) for i in ('min_ilevel', 'max_ilevel'))
level = 'Level {} {}'.format('-'.join(levels), item['item_type_name'])
image, image_large = (url and url.replace(
'http://media.steampowered.com', 'https://steamcdn-a.akamaihd.net'
) for url in (item['image_url'], item['image_url_large']))
itemdict = {'index': index,
'name': name,
'image': image,
'image_large': image_large,
'description': description,
'level': level,
'attributes': attributes,
'classes': classes,
'tags': tags,
'storeprice': storeprice,
'marketprice': {'backpack.tf': backpackprice},
'blueprints': blueprint}
if 'paint' in tags:
paintvalue = item['attributes'][0]['value']
# Ignore Paint Tool
if paintvalue != 0:
itemdict['image'] = itemdict['image_large'] = (
'/images/paints/Paint_Can_{}.png'.format(paintvalue))
return itemdict | 9f9eceb588c7dc031bab633eadc139095806d38a | 15,269 |
def port_list(request, board_id):
"""Get ports attached to a board."""
return iotronicclient(request).port.list() | 0fcf7fc4db60678c7e5ec4606e9b12174966912f | 15,270 |
import urllib
import os
import fnmatch
import mimetypes
def _get_archive(url, mode='r', opts=None):
"""Get archive plugin for given URL."""
if opts is None:
opts = {}
logger.debug('readdata._get_archive: url %s' % url)
url_tuple = urllib.parse.urlsplit(url, scheme="file")
if os.name == 'nt' and \
url_tuple.scheme == 'file' and \
fnmatch.fnmatch(url_tuple.netloc, '[A-Za-z]:\\*'):
# Windows: Parse without /x:, then re-attach drive letter
_path = url_tuple.netloc
else:
_path = url_tuple.path
# url_tuple = urllib.parse.urlsplit(url, scheme='file')
mimetype = mimetypes.guess_type(_path)[0]
archive = imagedata.archives.find_mimetype_plugin(
mimetype,
url,
mode,
opts=opts)
logger.debug('readdata._get_archive: _mimetypes %s' % mimetype)
logger.debug('readdata._get_archive: archive %s' % archive.name)
return archive | 5ae0b2ba5867ae99f4a07666365fef6084334a0c | 15,271 |
def pure_python_npairs_per_object_3d(sample1, sample2, rbins, period=None):
"""
"""
if period is None:
xperiod, yperiod, zperiod = np.inf, np.inf, np.inf
else:
xperiod, yperiod, zperiod = period, period, period
npts1, npts2, num_rbins = len(sample1), len(sample2), len(rbins)
counts = np.zeros((npts1, num_rbins), dtype=int)
for i in range(npts1):
for j in range(npts2):
dx = sample1[i, 0] - sample2[j, 0]
dy = sample1[i, 1] - sample2[j, 1]
dz = sample1[i, 2] - sample2[j, 2]
if dx > xperiod/2.:
dx = xperiod - dx
elif dx < -xperiod/2.:
dx = -(xperiod + dx)
if dy > yperiod/2.:
dy = yperiod - dy
elif dy < -yperiod/2.:
dy = -(yperiod + dy)
if dz > zperiod/2.:
dz = zperiod - dz
elif dz < -zperiod/2.:
dz = -(zperiod + dz)
d = np.sqrt(dx*dx + dy*dy + dz*dz)
for irbin, r in enumerate(rbins):
if d < r:
counts[i, irbin] += 1
return counts | 98b45bbbf50eea9e4dfa39cfd9093ec6fc0c0459 | 15,272 |
def cal_aic(X, y_pred, centers, weight=None):
"""Ref: https://en.wikipedia.org/wiki/Akaike_information_criterion
"""
if weight is None:
weight = np.ones(X.shape[0], dtype=X.dtype)
para_num = centers.shape[0] * (X.shape[1] + 1)
return cal_log_likelihood(X, y_pred, centers, weight) - para_num | fd6f7019dcd6aec7efb21ff159541cee0e56bdfb | 15,273 |
def get_gid(cfg, groupname):
"""
[description]
gets and returns the GID for a given groupname
[parameter info]
required:
cfg: the config object. useful everywhere
groupname: the name of the group we want to find the GID for
[return value]
returns an integer representing the GID of the group if successful
returns False if unsuccessful
"""
# validate/construct/get the realm.site_id.domain data
fqgn = mothership.validate.v_get_fqn(cfg, name=groupname)
groupname, realm, site_id, domain = mothership.validate.v_split_fqn(cfg, fqgn)
# gather group data
g = cfg.dbsess.query(Groups).\
filter(Groups.groupname==groupname).\
filter(Groups.site_id==site_id).\
filter(Groups.realm==realm).first()
if g:
return g.gid
else:
return False | ee139abfe8904de1983e505db7bf882580768080 | 15,274 |
from typing import Set
from typing import Dict
from typing import Any
def _elements_from_data(
edge_length: float,
edge_width: float,
layers: Set[TemperatureName],
logger: Logger,
portion_covered: float,
pvt_data: Dict[Any, Any],
x_resolution: int,
y_resolution: int,
) -> Any:
"""
Returns mapping from element coordinate to element based on the input data.
:param edge_length:
The maximum length of an edge element along the top and bottom edges of the
panel, measured in meters.
:param edge_width:
The maximum width of an edge element along the side edges of the panel, measured
in meters.
:param layers:
The `set` of layers to include in the system.
:param logger:
The :class:`logging.Logger` logger instance used for the run.
:param portion_covered:
The portion of the PVT absorber that is covered with PV cells. The uncovered
section is mapped as solar absorber only with glazing as appropriate.
:param pvt_data:
The raw PVT data, extracted from the data file.
:param x_resolution:
The x resolution for the run.
:param y_resolution:
The y resolution for the run.
:return:
A mapping between the element coordinates and the element for all elements
within the panel.
"""
# * If 1x1, warn that 1x1 resolution is depreciated and should not really be used.
if x_resolution == 1 and y_resolution == 1:
logger.warn(
"Running the system at a 1x1 resolution is depreciated. Consider running "
"at a higher resolution."
)
return {
element.ElementCoordinates(0, 0): element.Element(
TemperatureName.absorber in layers,
TemperatureName.glass in layers,
pvt_data["pvt_collector"]["length"],
True,
TemperatureName.pv in layers,
TemperatureName.upper_glass in layers,
pvt_data["pvt_collector"]["width"],
0,
0,
0,
)
}
# Extract the necessary parameters from the system data.
try:
number_of_pipes = pvt_data["absorber"]["number_of_pipes"]
except KeyError as e:
raise MissingParametersError(
"Element", "The number of pipes attached to the absorber must be supplied."
) from None
try:
panel_length = pvt_data["pvt_collector"]["length"]
except KeyError as e:
raise MissingParametersError(
"Element", "PVT panel length must be supplied."
) from None
try:
panel_width = pvt_data["pvt_collector"]["width"]
except KeyError as e:
raise MissingParametersError(
"Element", "PVT panel width must be supplied."
) from None
try:
bond_width = pvt_data["bond"]["width"]
except KeyError as e:
raise MissingParametersError(
"Element", "Collector-to-pipe bond width must be supplied."
) from None
# * Determine the spacing between the pipes.
pipe_spacing = (x_resolution - number_of_pipes) / (number_of_pipes + 1)
if int(pipe_spacing) != pipe_spacing:
raise InvalidParametersError(
"The resolution supplied results in an uneven pipe distribution.",
"pipe_spcaing",
)
# * Determine the indicies of elements that have pipes attached.
pipe_positions = list(
range(int(pipe_spacing), x_resolution - 2, int(pipe_spacing) + 1)
)
# Determine whether the width of the elements is greater than or less than the edge
# width and adjust accordingly.
nominal_element_width: float = (
panel_width - number_of_pipes * bond_width - 2 * edge_width
) / (x_resolution - number_of_pipes - 2)
if nominal_element_width < edge_width:
nominal_element_width = (panel_width - number_of_pipes * bond_width) / (
x_resolution - number_of_pipes
)
edge_width = nominal_element_width
# Likewise, determine whether the nominal element height is greater than the edge
# height and adjust accordingly.
nominal_element_length: float = (panel_length - 2 * edge_length) / (
y_resolution - 2
)
if nominal_element_length < edge_length:
nominal_element_length = panel_length / y_resolution
edge_length = nominal_element_length
# * Instantiate the array of elements.
# Construct the elemented array based on the arguments.
pv_coordinate_cutoff = int(y_resolution * portion_covered)
try:
elements = {
element.ElementCoordinates(
x_coordinate(element_number, x_resolution),
y_coordinate(element_number, x_resolution),
): element.Element(
absorber=TemperatureName.absorber in layers,
glass=TemperatureName.glass in layers,
length=edge_length
if y_coordinate(element_number, x_resolution) in {0, y_resolution - 1}
else nominal_element_length,
pipe=x_coordinate(element_number, x_resolution) in pipe_positions
if TemperatureName.pipe in layers
else False,
pv=y_coordinate(element_number, x_resolution) <= pv_coordinate_cutoff
if TemperatureName.pv in layers
else False,
upper_glass=TemperatureName.upper_glass in layers,
# Use the edge with if the element is an edge element.
width=edge_width
if x_coordinate(element_number, x_resolution) in {0, x_resolution - 1}
# Otherwise, use the bond width if the element is a pipe element.
else bond_width
if x_coordinate(element_number, x_resolution) in pipe_positions
# Otherwise, use the nominal element width.
else nominal_element_width,
x_index=x_coordinate(element_number, x_resolution),
y_index=y_coordinate(element_number, x_resolution),
pipe_index=pipe_positions.index(
x_coordinate(element_number, x_resolution)
)
if x_coordinate(element_number, x_resolution) in pipe_positions
else None,
)
for element_number in range(x_resolution * y_resolution)
}
except KeyError as e:
raise MissingParametersError(
"PVT", f"Missing parameters when instantiating the PV-T system: {str(e)}"
) from None
return elements | 80bef4fc80a22da823365fcdc756b6e35d19cdf2 | 15,275 |
def GetControllers(wing_serial):
"""Returns control gain matrices for any kite serial number."""
if wing_serial == m.kWingSerial01:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.209, -0.209, 0.0, 0.0, 0.009, 0.009, -0.005, 0.017]
)
longitudinal_gains_min_airspeed = (
[[0.005, 0.034, -0.716, -0.333, 0.311],
[-9.239, -68.061, 1361.046, 641.777, -589.016]]
)
longitudinal_gains_nominal_airspeed = (
[[0.014, 0.013, -0.509, -0.168, 0.316],
[-6.676, -6.529, 234.939, 80.993, -147.915]]
)
longitudinal_gains_max_airspeed = (
[[0.009, 0.007, -0.401, -0.136, 0.316],
[-1.965, -1.585, 79.966, 28.908, -65.259]]
)
lateral_gains_min_airspeed = (
[[1.477, -1.589, -0.434, 0.296, -0.75, 0.329],
[0.224, 1.045, 0.065, -0.554, -0.429, -0.282],
[-18215.48, -42217.142, -2192.239, 28689.136, 25162.461, 12500.22]]
)
lateral_gains_nominal_airspeed = (
[[1.063, -1.48, -0.234, 0.195, -0.772, 0.317],
[0.38, 1.123, 0.036, -0.386, -0.609, -0.376],
[-6604.64, -11507.484, -340.275, 5156.255, 9047.472, 4427.592]]
)
lateral_gains_max_airspeed = (
[[0.982, -1.395, -0.198, 0.149, -0.786, 0.309],
[0.27, 1.107, 0.027, -0.287, -0.613, -0.391],
[-2275.783, -4917.11, -119.56, 1730.983, 4062.059, 2033.279]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.587, 0.004, 0.11], [-0.03, -6.079, -0.026], [0.243, 0.006, -1.06]]
)
elif wing_serial == m.kWingSerial04Hover:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.211, -0.211, 0.0, 0.0, 0.011, 0.011, 0.005, 0.008]
)
longitudinal_gains_min_airspeed = (
[[-0.002, 0.033, -0.73, -0.357, 0.311],
[4.546, -64.827, 1390.055, 688.515, -589.338]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.548, -0.182, 0.316],
[-5.284, -6.728, 254.985, 87.909, -148.241]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.441, -0.144, 0.316],
[-1.676, -1.659, 89.199, 30.631, -65.417]]
)
lateral_gains_min_airspeed = (
[[1.44, -1.617, -0.409, 0.296, -0.743, 0.333],
[0.254, 1.044, 0.06, -0.551, -0.432, -0.277],
[-19794.459, -43094.943, -1997.427, 28857.76, 25564.594, 12475.401]]
)
lateral_gains_nominal_airspeed = (
[[1.036, -1.502, -0.224, 0.194, -0.768, 0.32],
[0.433, 1.136, 0.033, -0.39, -0.614, -0.374],
[-7324.836, -11932.75, -305.45, 5272.765, 9170.382, 4461.79]]
)
lateral_gains_max_airspeed = (
[[0.956, -1.415, -0.192, 0.148, -0.783, 0.311],
[0.323, 1.123, 0.025, -0.291, -0.617, -0.389],
[-2588.374, -5128.587, -109.03, 1771.109, 4110.266, 2055.664]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.92, -0.002, 0.128],
[-0.027, -5.506, -0.024],
[0.252, 0.005, -1.083]]
)
elif wing_serial == m.kWingSerial04Crosswind:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.206, -0.206, 0.0, 0.0, 0.006, 0.006, 0.004, 0.009]
)
longitudinal_gains_min_airspeed = (
[[-0.004, 0.033, -0.743, -0.369, 0.311],
[6.879, -65.952, 1416.656, 711.871, -589.814]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.562, -0.187, 0.316],
[-5.048, -6.949, 262.084, 90.532, -148.346]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.451, -0.146, 0.316],
[-1.652, -1.714, 91.319, 31.157, -65.442]]
)
lateral_gains_min_airspeed = (
[[1.473, -1.597, -0.421, 0.294, -0.746, 0.331],
[0.237, 1.043, 0.061, -0.559, -0.431, -0.28],
[-19140.086, -42177.964, -1957.253, 29074.862, 25405.542, 12435.884]]
)
lateral_gains_nominal_airspeed = (
[[1.063, -1.486, -0.228, 0.193, -0.769, 0.319],
[0.403, 1.124, 0.033, -0.391, -0.613, -0.374],
[-6972.675, -11547.427, -303.445, 5257.572, 9146.686, 4416.334]]
)
lateral_gains_max_airspeed = (
[[0.983, -1.401, -0.195, 0.148, -0.782, 0.311],
[0.294, 1.108, 0.025, -0.291, -0.617, -0.389],
[-2439.793, -4940.323, -108.619, 1761.451, 4108.271, 2029.644]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.721, -0.001, 0.125],
[-0.028, -5.332, -0.022],
[0.243, 0.005, -1.048]]
)
elif wing_serial == m.kWingSerial05Hover:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.204, -0.204, 0.0, 0.0, 0.004, 0.004, 0.004, 0.007]
)
longitudinal_gains_min_airspeed = (
[[-0.003, 0.033, -0.732, -0.353, 0.311],
[5.756, -65.225, 1393.028, 681.0, -589.458]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.552, -0.181, 0.316],
[-5.157, -6.823, 257.066, 87.46, -148.262]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.445, -0.143, 0.316],
[-1.655, -1.692, 90.117, 30.558, -65.423]]
)
lateral_gains_min_airspeed = (
[[1.438, -1.611, -0.401, 0.289, -0.743, 0.333],
[0.257, 1.041, 0.058, -0.542, -0.432, -0.278],
[-20011.52, -42735.847, -1914.014, 28358.023, 25584.42, 12448.614]]
)
lateral_gains_nominal_airspeed = (
[[1.043, -1.502, -0.221, 0.191, -0.767, 0.32],
[0.428, 1.13, 0.032, -0.383, -0.614, -0.374],
[-7288.823, -11800.514, -296.679, 5172.453, 9185.489, 4445.84]]
)
lateral_gains_max_airspeed = (
[[0.965, -1.415, -0.191, 0.146, -0.782, 0.311],
[0.318, 1.117, 0.024, -0.286, -0.617, -0.389],
[-2567.285, -5064.437, -106.454, 1742.745, 4117.291, 2047.19]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-6.043, -0.014, 0.134],
[-0.027, -5.618, -0.024],
[0.257, 0.005, -1.105]]
)
elif wing_serial == m.kWingSerial05Crosswind:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.198, -0.198, 0.0, 0.0, -0.002, -0.002, 0.003, 0.009]
)
longitudinal_gains_min_airspeed = (
[[-0.004, 0.033, -0.744, -0.364, 0.311],
[7.876, -66.499, 1418.317, 702.426, -589.905]]
)
longitudinal_gains_nominal_airspeed = (
[[0.01, 0.014, -0.565, -0.186, 0.316],
[-4.942, -7.054, 263.793, 89.867, -148.357]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.454, -0.145, 0.316],
[-1.635, -1.75, 92.08, 31.033, -65.443]]
)
lateral_gains_min_airspeed = (
[[1.47, -1.591, -0.412, 0.286, -0.746, 0.331],
[0.24, 1.039, 0.059, -0.549, -0.431, -0.281],
[-19344.869, -41752.487, -1867.667, 28478.098, 25425.604, 12404.153]]
)
lateral_gains_nominal_airspeed = (
[[1.07, -1.485, -0.226, 0.189, -0.768, 0.32],
[0.397, 1.117, 0.033, -0.383, -0.613, -0.374],
[-6919.209, -11394.187, -294.167, 5138.956, 9160.95, 4397.605]]
)
lateral_gains_max_airspeed = (
[[0.993, -1.401, -0.193, 0.145, -0.782, 0.312],
[0.287, 1.101, 0.025, -0.285, -0.618, -0.389],
[-2410.981, -4866.463, -105.87, 1728.008, 4114.679, 2019.74]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.857, -0.012, 0.131],
[-0.03, -5.457, -0.022],
[0.249, 0.005, -1.072]]
)
elif wing_serial == m.kWingSerial06Hover:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.208, -0.208, 0.0, 0.0, 0.008, 0.008, 0.006, 0.007]
)
longitudinal_gains_min_airspeed = (
[[-0.003, 0.032, -0.731, -0.358, 0.311],
[6.453, -64.539, 1392.121, 689.765, -589.371]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.553, -0.183, 0.316],
[-5.088, -6.779, 257.684, 88.435, -148.279]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.447, -0.144, 0.316],
[-1.637, -1.678, 90.467, 30.782, -65.435]]
)
lateral_gains_min_airspeed = (
[[1.438, -1.616, -0.408, 0.296, -0.742, 0.333],
[0.255, 1.044, 0.059, -0.552, -0.432, -0.277],
[-19907.663, -43108.523, -1968.711, 28927.246, 25591.178, 12468.239]]
)
lateral_gains_nominal_airspeed = (
[[1.038, -1.503, -0.224, 0.194, -0.768, 0.32],
[0.435, 1.136, 0.033, -0.391, -0.614, -0.374],
[-7364.944, -11935.606, -300.999, 5287.24, 9178.769, 4462.368]]
)
lateral_gains_max_airspeed = (
[[0.958, -1.416, -0.192, 0.148, -0.783, 0.311],
[0.325, 1.123, 0.024, -0.291, -0.617, -0.389],
[-2605.535, -5129.038, -107.775, 1775.087, 4114.053, 2056.295]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.912, -0.009, 0.13],
[-0.025, -5.494, -0.024],
[0.252, 0.005, -1.081]]
)
elif wing_serial == m.kWingSerial06Crosswind:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.203, -0.203, 0.0, 0.0, 0.003, 0.003, 0.004, 0.008]
)
longitudinal_gains_min_airspeed = (
[[-0.004, 0.033, -0.743, -0.369, 0.311],
[8.412, -65.841, 1417.073, 711.786, -589.819]]
)
longitudinal_gains_nominal_airspeed = (
[[0.01, 0.014, -0.566, -0.188, 0.316],
[-4.888, -7.008, 264.204, 90.884, -148.372]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.455, -0.146, 0.316],
[-1.62, -1.735, 92.313, 31.262, -65.454]]
)
lateral_gains_min_airspeed = (
[[1.471, -1.596, -0.42, 0.293, -0.746, 0.331],
[0.239, 1.043, 0.06, -0.559, -0.431, -0.28],
[-19231.343, -42149.313, -1926.317, 29079.197, 25426.856, 12427.585]]
)
lateral_gains_nominal_airspeed = (
[[1.065, -1.487, -0.228, 0.193, -0.769, 0.319],
[0.404, 1.123, 0.033, -0.391, -0.613, -0.374],
[-6992.628, -11534.142, -299.093, 5258.12, 9152.573, 4415.616]]
)
lateral_gains_max_airspeed = (
[[0.986, -1.402, -0.194, 0.148, -0.782, 0.311],
[0.294, 1.108, 0.025, -0.29, -0.617, -0.389],
[-2447.327, -4933.324, -107.393, 1761.417, 4110.821, 2029.552]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.725, -0.008, 0.128],
[-0.027, -5.331, -0.022],
[0.243, 0.005, -1.048]]
)
elif wing_serial == m.kWingSerial07Hover:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.211, -0.211, 0.0, 0.0, 0.011, 0.011, 0.005, 0.008]
)
longitudinal_gains_min_airspeed = (
[[-0.002, 0.033, -0.73, -0.357, 0.311],
[4.546, -64.827, 1390.055, 688.515, -589.338]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.548, -0.182, 0.316],
[-5.284, -6.728, 254.985, 87.909, -148.241]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.441, -0.144, 0.316],
[-1.676, -1.659, 89.199, 30.631, -65.417]]
)
lateral_gains_min_airspeed = (
[[1.44, -1.617, -0.409, 0.296, -0.743, 0.333],
[0.254, 1.044, 0.06, -0.551, -0.432, -0.277],
[-19794.459, -43094.943, -1997.427, 28857.76, 25564.594, 12475.401]]
)
lateral_gains_nominal_airspeed = (
[[1.036, -1.502, -0.224, 0.194, -0.768, 0.32],
[0.433, 1.136, 0.033, -0.39, -0.614, -0.374],
[-7324.836, -11932.75, -305.45, 5272.765, 9170.382, 4461.79]]
)
lateral_gains_max_airspeed = (
[[0.956, -1.415, -0.192, 0.148, -0.783, 0.311],
[0.323, 1.123, 0.025, -0.291, -0.617, -0.389],
[-2588.374, -5128.587, -109.03, 1771.109, 4110.266, 2055.664]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.92, -0.002, 0.128],
[-0.027, -5.506, -0.024],
[0.252, 0.005, -1.083]]
)
elif wing_serial == m.kWingSerial07Crosswind:
airspeed_table = (
[30.0, 60.0, 90.0]
)
flap_offsets = (
[-0.206, -0.206, 0.0, 0.0, 0.006, 0.006, 0.004, 0.009]
)
longitudinal_gains_min_airspeed = (
[[-0.004, 0.033, -0.743, -0.369, 0.311],
[6.879, -65.952, 1416.656, 711.871, -589.814]]
)
longitudinal_gains_nominal_airspeed = (
[[0.011, 0.014, -0.562, -0.187, 0.316],
[-5.048, -6.949, 262.084, 90.532, -148.346]]
)
longitudinal_gains_max_airspeed = (
[[0.008, 0.008, -0.451, -0.146, 0.316],
[-1.652, -1.714, 91.319, 31.157, -65.442]]
)
lateral_gains_min_airspeed = (
[[1.473, -1.597, -0.421, 0.294, -0.746, 0.331],
[0.237, 1.043, 0.061, -0.559, -0.431, -0.28],
[-19140.086, -42177.964, -1957.253, 29074.862, 25405.542, 12435.884]]
)
lateral_gains_nominal_airspeed = (
[[1.063, -1.486, -0.228, 0.193, -0.769, 0.319],
[0.403, 1.124, 0.033, -0.391, -0.613, -0.374],
[-6972.675, -11547.427, -303.445, 5257.572, 9146.686, 4416.334]]
)
lateral_gains_max_airspeed = (
[[0.983, -1.401, -0.195, 0.148, -0.782, 0.311],
[0.294, 1.108, 0.025, -0.291, -0.617, -0.389],
[-2439.793, -4940.323, -108.619, 1761.451, 4108.271, 2029.644]]
)
B_flaps_to_pqr_min_airspeed = ( # pylint: disable=invalid-name
[[-5.721, -0.001, 0.125],
[-0.028, -5.332, -0.022],
[0.243, 0.005, -1.048]]
)
else:
assert False, 'wing_serial %d was not recognized' % wing_serial
return {
'airspeed_table': (
airspeed_table),
'flap_offsets': (
flap_offsets),
'longitudinal_gains_min_airspeed': (
longitudinal_gains_min_airspeed),
'longitudinal_gains_nominal_airspeed': (
longitudinal_gains_nominal_airspeed),
'longitudinal_gains_max_airspeed': (
longitudinal_gains_max_airspeed),
'lateral_gains_min_airspeed': (
lateral_gains_min_airspeed),
'lateral_gains_nominal_airspeed': (
lateral_gains_nominal_airspeed),
'lateral_gains_max_airspeed': (
lateral_gains_max_airspeed),
'B_flaps_to_pqr_min_airspeed': (
B_flaps_to_pqr_min_airspeed),
} | e9e557909cfb9a7e885f14d20948436b653f4f31 | 15,276 |
def rotate(mat, degrees):
"""
Rotates the input image by a given number of degrees about its center.
Border pixels are extrapolated by replication.
:param mat: input image
:param degrees: number of degrees to rotate (positive is counter-clockwise)
:return: rotated image
"""
rot_mat = cv2.getRotationMatrix2D((mat.shape[1] / 2, mat.shape[0] / 2), degrees, 1)
return cv2.warpAffine(mat, rot_mat, (mat.shape[1], mat.shape[0]),
borderMode=cv2.BORDER_REPLICATE) | 6de73e2701fdad422497dd53d271accc1f039128 | 15,277 |
def spec_defaults():
"""
Return a mapping with spec attribute defaults to ensure that the
returned results are the same on RubyGems 1.8 and RubyGems 2.0
"""
return {
'base_dir': None,
'bin_dir': None,
'cache_dir': None,
'doc_dir': None,
'gem_dir': None,
'gems_dir': None,
'ri_dir': None,
'spec_dir': None,
'spec_file': None,
'cache_file': None,
'full_gem_path': None,
'full_name': None,
'metadata': {},
'full_name': None,
'homepage': '',
'licenses': [],
'loaded_from': None,
} | 5f220168e2cc63c4572c29c17cb4192a7a5d1427 | 15,278 |
def rdict(x):
"""
recursive conversion to dictionary
converts objects in list members to dictionary recursively
"""
if isinstance(x, list):
l = [rdict(_) for _ in x]
return l
elif isinstance(x, dict):
x2 = {}
for k, v in x.items():
x2[k] = rdict(v)
return x2
else:
if hasattr(x, '__dict__'):
d = x.__dict__
toremove = []
for k, v in d.items():
if v is None:
toremove.append(k)
else:
d[k] = rdict(v)
for k in toremove:
del(d[k])
return d
else:
return x | dd09486aa76ee1a27306510a1100502bae482015 | 15,279 |
import requests
from bs4 import BeautifulSoup
def get_pid(part_no):
"""Extract the PID from the part number page"""
url = 'https://product.tdk.com/en/search/capacitor/ceramic/mlcc/info?part_no=' + part_no
page = requests.get(url)
if (page.status_code != 200):
print('Error getting page({}): {}'.format(page.status_code, url))
return None
soup = BeautifulSoup(page.text, 'html.parser')
pid_input = soup.find(id='pid')
if pid_input is None:
return None
return pid_input['value'] | 8cc01b011e23d3bc972cb5552662b55ab998dba0 | 15,280 |
def verbatim_det_lcs_all(plags, psr, susp_text, src_text, susp_offsets, src_offsets, th_shortest):
"""
DESCRIPTION: Uses longest common substring algorithm to classify a pair of documents being compared as verbatim plagarism candidate (the pair of documents), and removing the none verbatim cases if positive
INPUT: plags <list of list of two tuples [(int, int), (int, int)]> - Have the plagiarism cases represented by min and max sentence index in suspicious and source document respectively
psr <list of list of tuples (int, int)> - Contains the clusters
susp_text <string> - Suspicios document text
src_text <string> - Source document text
susp_offsets <list of tuples (int, int)> - Contain the char offset and length of each suspicious document sentence
src_offsets <list of tuples (int, int)> - Contain the char offset and length of each source document sentence
th_shortest <int> - Threshold in characters of shortest common substring allowed
OUTPUT: res_plags <list of list of two tuples [(int, int), (int, int)]> - Contains the plagiarism cases as common substrings or the same as the arguments depending on type_plag
res_psr <list of list of tuples (int, int)> - Contains the clusters with seeds present in the common substrings, or the same as the arguments depending on type_plag
type_plag <0 or 1> - 1: verbatim plagiarism case 0: Other plagiarism case
res_long_frag <list> - Contains the lengths of common substrings
"""
#plags [[(susp_ini, susp_end), (src_ini, src_end)], ...]
res_plags = []
res_psr = []
res_long_frag = []
i = 0
type_plag = 0 #0: Unknown, 1: no-obfuscation
#print 'Plags:', len(plags)
while i < len(plags): #For each plagiarism case
#print 'Case',i
#print 'Plag case', plags[i]
#print 'Seeds', psr[i]
#sentences in seeds an those not in seeds
res2 = common_substring_pro_all(susp_text[susp_offsets[plags[i][0][0]][0] : susp_offsets[plags[i][0][1]][0] + susp_offsets[plags[i][0][1]][1]], src_text[src_offsets[plags[i][1][0]][0] : src_offsets[plags[i][1][1]][0] + src_offsets[plags[i][1][1]][1]], th_shortest)
res = []
#Remove overlapping
for tup_i in res2:
flag = 0
for tup_j in res2:
if tup_i != tup_j and tup_i[2] >= tup_j[2] and tup_i[3] <= tup_j[3]:
flag = 1
break
if flag == 0:
res.append(tup_i)
#print 'Res2', res2
#print 'Res', res
#max_len = max([res[1] - res[0], res[3] - res[2]])
#max_len = [(x[1] - x[0], x[3] - x[2]) for x in res]
if len(res) > 0:
if type_plag == 1:
#print max_len, True, 'Removing seeds with lcs shorter than', th_shortest
for sub_case in res:
res_plags.append([(susp_offsets[plags[i][0][0]][0] + sub_case[0], susp_offsets[plags[i][0][0]][0] + sub_case[1]), (src_offsets[plags[i][1][0]][0] + sub_case[2], src_offsets[plags[i][1][0]][0] + sub_case[3])])
res_psr.append(psr[i])
res_long_frag.append(max([sub_case[1] - sub_case[0], sub_case[3] - sub_case[2]]))
else:
#print max_len, 'Type 02-no-obfuscation detected. Starting over!'
#print max_len, 'Type 02-no-obfuscation detected. Removing previously added cases!'
type_plag = 1
res_plags = []
res_psr = []
res_long_frag = []
for sub_case in res:
res_plags.append([(susp_offsets[plags[i][0][0]][0] + sub_case[0], susp_offsets[plags[i][0][0]][0] + sub_case[1]), (src_offsets[plags[i][1][0]][0] + sub_case[2], src_offsets[plags[i][1][0]][0] + sub_case[3])])
res_psr.append(psr[i])
res_long_frag.append(max([sub_case[1] - sub_case[0], sub_case[3] - sub_case[2]]))
#i = -1
else:
if type_plag != 1:
#print max_len, False, 'Adding'
res_plags.append(plags[i])
res_psr.append(psr[i])
res_long_frag.append(-1)
#else:
#print max_len, False, 'Removing case because 02-no-obfuscation was detected'
i += 1
return res_plags, res_psr, type_plag, res_long_frag | d233f3745bdd458fe65cbbdbc056c8cca611d755 | 15,281 |
import multiprocessing
import logging
import multiprocessing.dummy as m
import multiprocessing as m
import itertools
def autopooler(n,
it,
*a,
chunksize=1,
dummy=False,
return_iter=False,
unordered=False,
**ka):
"""Uses multiprocessing.Pool or multiprocessing.dummy.Pool to run iterator in parallel.
Parameters
------------
n: int
Number of parallel processes. Set to 0 to use auto detected CPU count.
it: iterator of (function,tuple,dict)
Each iteration computes **function**\ (\*\ **tuple**\ ,\*\*\ **dict**\ ). **function** must be picklable, i.e. a base level function in a module or file.
a: tuple
Arguments passed to Pool.
chunksize: int
Number of iterations passed to each process each time.
dummy: bool
Whether to use multiprocessing.dummy instead
return_iter: bool
Not Implemented. Whether to return iterator of results instead. If not, return list of results.
unordered: bool
Whether the order of output matters.
ka: dict
Keyword arguments passed to Pool
Returns
----------
list (or iterator if return_iter) of any
Results returned by function(\*tuple,\*\*dict), in same order of the iterator if not unordered.
"""
if dummy:
else:
if n == 0:
n = autocount()
logging.info('Using {} threads'.format(n))
if n == 1:
ans = map(autopooler_caller, it)
if not return_iter:
ans = list(ans)
assert len(ans) > 0
else:
# Catches iterator errors (only if occurs at the first), and emptiness
it = itertools.chain([next(it)], it)
with m.Pool(n, *a, **ka) as p:
if unordered:
ans = p.imap_unordered(autopooler_caller, it, chunksize)
else:
ans = p.imap(autopooler_caller, it, chunksize)
if not return_iter:
ans = list(ans)
else:
raise NotImplementedError
return ans | 489426a16977b632dd16fe351eee167c7eb5fb0d | 15,282 |
def grow_population(initial, days_to_grow):
"""
Track the fish population growth from an initial population, growing over days_to_grow number of days.
To make this efficient two optimizations have been made:
1. Instead of tracking individual fish (which doubles every approx. 8 days which will result O(10^9)
fish over 256 days), we instead compute the sum of fish with the same due date and use the due date
as the offset into the current popluation list. For example, if 5 fish have a timer of 1 and 2 fish
have a timer of 4 the population would be tracked as: [0, 5, 0, 0, 2, 0, 0, 0, 0]
2. Modulo arithmetic is used instead of fully iterating through the entire list to decrement the due
date of each fish every day. Using modula arithmetic provides a projection into the fish data that
looks like its changing each day without needing O(n) operations and instead we can update the list
in constant time regardless of the number of different ages for fish.
"""
current = list(initial)
if days_to_grow == 0:
return current
for day in range(0, days_to_grow):
due_index = day % 9
due_count = current[due_index]
current[(day+7)%9] += due_count
current[(day+9)%9] += due_count
current[due_index] = max(0, current[due_index] - due_count)
return current | 88b8283e5c1e6de19acb76278ef16d9d6b94de00 | 15,283 |
import PySide.QtGui as QtGui
import PyQt5.QtGui as QtGui
def get_QBrush():
"""QBrush getter."""
try:
return QtGui.QBrush
except ImportError:
return QtGui.QBrush | 548226da434077ee1d0d1d2fb4a6762faf5f091d | 15,284 |
def apply_odata_query(query: ClauseElement, odata_query: str) -> ClauseElement:
"""
Shorthand for applying an OData query to a SQLAlchemy query.
Args:
query: SQLAlchemy query to apply the OData query to.
odata_query: OData query string.
Returns:
ClauseElement: The modified query
"""
lexer = ODataLexer()
parser = ODataParser()
model = query.column_descriptions[0]["entity"]
ast = parser.parse(lexer.tokenize(odata_query))
transformer = AstToSqlAlchemyClauseVisitor(model)
where_clause = transformer.visit(ast)
for j in transformer.join_relationships:
if str(j) not in _get_joined_attrs(query):
query = query.join(j)
return query.filter(where_clause) | 666dd05856db79ce90f29e864aeaf4188bd425d0 | 15,285 |
def get_sql(conn, data, did, tid, exid=None, template_path=None):
"""
This function will generate sql from model data.
:param conn: Connection Object
:param data: data
:param did: Database ID
:param tid: Table id
:param exid: Exclusion Constraint ID
:param template_path: Template Path
:return:
"""
name = data['name'] if 'name' in data else None
if exid is not None:
sql = render_template("/".join([template_path, 'properties.sql']),
did=did, tid=tid, cid=exid)
status, res = conn.execute_dict(sql)
if not status:
raise Exception(res)
if len(res['rows']) == 0:
raise ObjectGone(
_('Could not find the exclusion constraint in the table.'))
old_data = res['rows'][0]
if 'name' not in data:
name = data['name'] = old_data['name']
sql = render_template("/".join([template_path, 'update.sql']),
data=data, o_data=old_data)
else:
if 'columns' not in data:
return _('-- definition incomplete'), name
elif isinstance(data['columns'], list) and len(data['columns']) < 1:
return _('-- definition incomplete'), name
sql = render_template("/".join([template_path, 'create.sql']),
data=data, conn=conn)
return sql, name | 45ec23f3e061491ad87ea0a59b7e08e32e5183a2 | 15,286 |
import six
import base64
def bytes_base64(x):
# type: (AnyStr) -> bytes
"""Turn bytes into base64"""
if six.PY2:
return base64.encodestring(x).replace('\n', '') # type: ignore
return base64.encodebytes(bytes_encode(x)).replace(b'\n', b'') | 543b0f1105545cda516890d2d6f4c5a8059c4365 | 15,287 |
def is_planar_enforced(gdf):
"""Test if a geodataframe has any planar enforcement violations
Parameters
----------
Returns
-------
boolean
"""
if is_overlapping(gdf):
return False
if non_planar_edges(gdf):
return False
_holes = holes(gdf)
if _holes.shape[0] > 0:
return False
return True | 0587cd351fcc7355d0767a404e446d91f8c59d4d | 15,288 |
def bson2uuid(bval: bytes) -> UUID:
"""Decode BSON Binary UUID as UUID."""
return UUID(bytes=bval) | 6fc81f03b6eabee3496bab6b407d6c665b001667 | 15,289 |
def ape_insert_new_fex(cookie, in_device_primary_key, in_model, in_serial, in_vendor):
""" Auto-generated UCS XML API Method. """
method = ExternalMethod("ApeInsertNewFex")
method.cookie = cookie
method.in_device_primary_key = in_device_primary_key
method.in_model = in_model
method.in_serial = in_serial
method.in_vendor = in_vendor
xml_request = method.to_xml(option=WriteXmlOption.DIRTY)
return xml_request | 2d10c37f26357ac9714d0dfe91967f4029857cd5 | 15,290 |
import aiohttp
import asyncio
async def get_pool_info(address, api_url="https://rest.stargaze-apis.com/cosmos"):
"""Pool value and current rewards via rest API.
Useful links:
https://api.akash.smartnodes.one/swagger/#/
https://github.com/Smart-Nodes/endpoints
"""
rewards_url = f"{api_url}/distribution/v1beta1/delegators/{ADDRESS}/rewards"
delegated_url = f"{api_url}/staking/v1beta1/delegations/{ADDRESS}"
async with aiohttp.ClientSession() as session:
rewards_data, pool_data = await asyncio.gather(
gather_json(session, rewards_url), gather_json(session, delegated_url)
)
rewards = float(rewards_data["rewards"][0]["reward"][0]["amount"]) / 1_000_000
pool_value = (
float(pool_data["delegation_responses"][0]["balance"]["amount"]) / 1_000_000
)
return pool_value, rewards | 34c54c840ed3a412002b99f798c23f495e1eb75d | 15,291 |
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width | 111025b6dddcf7380fd912a84154b551df4be5f3 | 15,292 |
def mnist_reader(numbers):
"""
Read MNIST dataset with specific numbers you needed
:param numbers: A list of number from 0 - 9 as you needed
:return: A tuple of a numpy array with specific numbers MNIST training dataset,
labels of the training set and the length of the training dataset.
"""
# Training Data
f = open('./data/train-images.idx3-ubyte')
loaded = np.fromfile(file=f, dtype=np.uint8)
trainX = loaded[16:].reshape((60000, 28, 28, 1)).astype(np.float32) / 127.5 - 1
f = open('./data/train-labels.idx1-ubyte')
loaded = np.fromfile(file=f, dtype=np.uint8)
trainY = loaded[8:].reshape((60000)).astype(np.int32)
_trainX = []
for idx in range(0, len(trainX)):
if trainY[idx] in numbers:
_trainX.append(trainX[idx])
return np.array(_trainX), trainY, len(_trainX) | 627a7fd41047383cd5869fe83efea2c2b0e2d25a | 15,293 |
import six
def _ensure_list(alist): # {{{
"""
Ensure that variables used as a list are actually lists.
"""
# Authors
# -------
# Phillip J. Wolfram, Xylar Asay-Davis
if isinstance(alist, six.string_types):
# print 'Warning, converting %s to a list'%(alist)
alist = [alist]
return alist | bd8115dad627f4553ded17757bfb838cfdb0200b | 15,294 |
def _parse_einsum_input(operands):
"""Parses einsum operands.
This function is based on `numpy.core.einsumfunc._parse_einsum_input`
function in NumPy 1.14.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> _parse_einsum_input(('...a,...a->...', a, b))
('@a,@a', '@', [a, b])
>>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('@a,@a', '@', [a, b])
"""
if not operands:
raise ValueError('No input operands')
if isinstance(operands[0], str):
subscripts = operands[0].replace(' ', '')
operands = operands[1:]
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError('Character %s is not a valid symbol.' % s)
# Check for proper "->"
if ('-' in subscripts) or ('>' in subscripts):
if any((
subscripts.count('-') > 1,
subscripts.count('>') > 1,
subscripts.count('->') != 1,
)):
raise ValueError('Subscripts can only contain one \'->\'.')
# Parse "..."
subscripts = subscripts.replace('...', '@')
if '.' in subscripts:
raise ValueError('Invalid Ellipses.')
else:
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = operand_list
subscripts = ''
last = len(subscript_list) - 1
for num, sub in enumerate(subscript_list):
for s in sub:
if s is Ellipsis:
subscripts += '@'
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError('For this input type lists must contain '
'either int or Ellipsis')
if num != last:
subscripts += ','
if output_list is not None:
subscripts += '->'
for s in output_list:
if s is Ellipsis:
subscripts += '@'
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError('For this input type lists must contain '
'either int or Ellipsis')
# Build output string if does not exist
if '->' in subscripts:
input_subscripts, output_subscript = subscripts.split('->')
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError(
'Output character %s did not appear in the input'
% ('...' if char == '@' else char))
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(',', '')
output_subscript = ''
for s in sorted(set(tmp_subscripts)):
if s == '@' or tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(',')) != len(operands):
raise ValueError('Number of einsum subscripts must be equal to the '
'number of operands.')
return input_subscripts, output_subscript, operands | 8c95c3d842a29fa637e6190e006638420b8a0d83 | 15,295 |
def convert_to_numpy(*args, **kwargs):
"""
Converts all tf tensors in args and kwargs to numpy array
Parameters
----------
*args :
positional arguments of arbitrary number and type
**kwargs :
keyword arguments of arbitrary number and type
Returns
-------
list
converted positional arguments
dict
converted keyboard arguments
"""
args = recursively_convert_elements(args, tf.Tensor,
_single_element_tensor_conversion)
kwargs = recursively_convert_elements(kwargs, tf.Tensor,
_single_element_tensor_conversion)
return convert_to_numpy_identity(*args, **kwargs) | 8059832fc4841b4cb96dcc77e96dd354dba399c2 | 15,296 |
async def delete_contact(
contact_key: int, hash: str, resource: Resource = Depends(get_json_resource)
):
"""
Delete the contact with the given key.
If the record has changed since the hash was obtained, a 409 error is returned.
"""
try:
await resource.delete(contact_key, hash)
except SirixServerError:
return Response(status_code=status.HTTP_409_CONFLICT)
return Response(status_code=status.HTTP_204_NO_CONTENT) | f984c5ece28ac8b58bb2d2137dcc94e2f3a7bf7c | 15,297 |
import jsonschema
def update_model_instance_meta_schema(request, file_type_id, **kwargs):
"""copies the metadata schema from the associated model program aggregation over to the model instance aggregation
"""
# Note: decorator 'authorise_for_aggregation_edit' sets the error_response key in kwargs
if 'error_response' in kwargs and kwargs['error_response']:
error_response = kwargs['error_response']
return JsonResponse(error_response, status=status.HTTP_400_BAD_REQUEST)
# Note: decorator 'authorise_for_aggregation_edit' sets the logical_file key in kwargs
logical_file = kwargs['logical_file']
metadata = logical_file.metadata
if not metadata.executed_by:
msg = "No associated model program was found"
error_response = {"status": "error", "message": msg}
return JsonResponse(error_response, status=status.HTTP_400_BAD_REQUEST)
elif not metadata.executed_by.metadata_schema_json:
msg = "Associated model program has no metadata schema"
error_response = {"status": "error", "message": msg}
return JsonResponse(error_response, status=status.HTTP_400_BAD_REQUEST)
logical_file.metadata_schema_json = metadata.executed_by.metadata_schema_json
if metadata.metadata_json:
# validate json data against metadata schema:
try:
metadata_json_schema = logical_file.metadata_schema_json
jsonschema.Draft4Validator(metadata_json_schema).validate(metadata.metadata_json)
except jsonschema.ValidationError as ex:
# delete existing invalid metadata
metadata.metadata_json = {}
logical_file.save()
metadata.is_dirty = True
metadata.save()
resource = logical_file.resource
resource_modified(resource, request.user, overwrite_bag=False)
ajax_response_data = {'status': 'success', 'logical_file_type': logical_file.type_name(),
'element_name': 'metadata_schema_json', 'message': "Update was successful"}
return JsonResponse(ajax_response_data, status=status.HTTP_200_OK) | c6f67f2f6386065919239f7d868797d97aec6874 | 15,298 |
def _calculate_permutation_scores_per_col(estimator, X, y, sample_weight, col_idx,
random_state, n_repeats, scorer):
"""Calculate score when `col_idx` is permuted."""
random_state = check_random_state(random_state)
# Work on a copy of X to to ensure thread-safety in case of threading based
# parallelism. Furthermore, making a copy is also useful when the joblib
# backend is 'loky' (default) or the old 'multiprocessing': in those cases,
# if X is large it will be automatically be backed by a readonly memory map
# (memmap). X.copy() on the other hand is always guaranteed to return a
# writable data-structure whose columns can be shuffled inplace.
X_permuted = X.copy()
scores = np.zeros(n_repeats)
shuffling_idx = np.arange(X.shape[0])
for n_round in range(n_repeats):
random_state.shuffle(shuffling_idx)
if hasattr(X_permuted, "iloc"):
col = X_permuted.iloc[shuffling_idx, col_idx]
col.index = X_permuted.index
X_permuted.iloc[:, col_idx] = col
else:
X_permuted[:, col_idx] = X_permuted[shuffling_idx, col_idx]
feature_score = _weights_scorer(
scorer, estimator, X_permuted, y, sample_weight
)
scores[n_round] = feature_score
return scores | 52c49ac3e4fd53490af04c9d862b506214e08f95 | 15,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.